aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2024-04-02 14:23:42 -0700
committerVitaly Buka <vitalybuka@google.com>2024-04-02 14:23:42 -0700
commit2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310 (patch)
tree4a2ce5eb31e8242dcbb7d7a3de82d3309fdc23c5 /llvm/test
parenteb6a41808ef4e058a24f9ebc6c85b10c966eb183 (diff)
parent89271b46761749503dffe94c60b9cbe0bda80284 (diff)
downloadllvm-2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310.zip
llvm-2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310.tar.gz
llvm-2fe88fc8b7a3c27d473b6a172f0dc8aae7be3310.tar.bz2
[𝘀𝗽𝗿] changes introduced through rebase
Created using spr 1.3.4 [skip ci]
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/AliasSet/intrinsics.ll34
-rw-r--r--llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll2
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/cast.ll1374
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/int-min-max.ll120
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll91
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll52
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-max.ll140
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-min.ll140
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll96
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll108
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll84
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll84
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll2
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll66
-rw-r--r--llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll2
-rw-r--r--llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll6
-rw-r--r--llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll6
-rw-r--r--llvm/test/Analysis/Lint/crash_empty_iterator.ll22
-rw-r--r--llvm/test/Analysis/MemorySSA/allow-check.ll29
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll70
-rw-r--r--llvm/test/Analysis/ValueTracking/known-non-zero.ll158
-rw-r--r--llvm/test/Analysis/ValueTracking/knownbits-select-from-cond.ll81
-rw-r--r--llvm/test/Assembler/debug-info.ll16
-rw-r--r--llvm/test/Assembler/flags.ll48
-rw-r--r--llvm/test/Assembler/getelementptr.ll24
-rw-r--r--llvm/test/Assembler/inrange-errors.ll46
-rw-r--r--llvm/test/Bindings/OCaml/core.ml2
-rw-r--r--llvm/test/Bindings/OCaml/debuginfo.ml10
-rw-r--r--llvm/test/Bindings/llvm-c/echo.ll14
-rw-r--r--llvm/test/Bitcode/DIExpression-aggresult.ll1
-rw-r--r--llvm/test/Bitcode/compatibility-3.6.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-3.7.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-3.8.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-3.9.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-4.0.ll18
-rw-r--r--llvm/test/Bitcode/compatibility-5.0.ll18
-rw-r--r--llvm/test/Bitcode/compatibility-6.0.ll18
-rw-r--r--llvm/test/Bitcode/compatibility.ll22
-rw-r--r--llvm/test/Bitcode/dbg-record-roundtrip.ll172
-rw-r--r--llvm/test/Bitcode/flags.ll29
-rw-r--r--llvm/test/Bitcode/thinlto-func-summary-vtableref-pgo.ll74
-rw-r--r--llvm/test/Bitcode/thinlto-function-summary.ll6
-rw-r--r--llvm/test/Bitcode/upgrade-dbg-addr.ll1
-rw-r--r--llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll72
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir299
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir178
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll92
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir245
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll135
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll90
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir28
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir27
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir29
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir36
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir29
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir49
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir57
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir9
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir329
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select.mir319
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir52
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-smull.ll53
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/abs.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/allow-check.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/and-sink.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-anyregcc.ll225
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll21
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-patchpoint.ll139
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-xaluo.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-zero-copy.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/bitcast.ll211
-rw-r--r--llvm/test/CodeGen/AArch64/bswap.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll81
-rw-r--r--llvm/test/CodeGen/AArch64/dllexport.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/extbinopload.ll31
-rw-r--r--llvm/test/CodeGen/AArch64/extract-vector-elt.ll1114
-rw-r--r--llvm/test/CodeGen/AArch64/extractvector-oob-load.mir7
-rw-r--r--llvm/test/CodeGen/AArch64/fcmp.ll81
-rw-r--r--llvm/test/CodeGen/AArch64/fexplog.ll65
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll79
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll51
-rw-r--r--llvm/test/CodeGen/AArch64/fold-global-offsets.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/fp-intrinsics.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/fpow.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/fptoi.ll301
-rw-r--r--llvm/test/CodeGen/AArch64/fsincos.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/hadd-combine.ll48
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/insert-subvector.ll150
-rw-r--r--llvm/test/CodeGen/AArch64/isinf.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/itofp.ll270
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.exp10.ll17
-rw-r--r--llvm/test/CodeGen/AArch64/load.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/misched-bundle.mir195
-rw-r--r--llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll46
-rw-r--r--llvm/test/CodeGen/AArch64/neon-compare-instructions.ll101
-rw-r--r--llvm/test/CodeGen/AArch64/neon-truncstore.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/overflow.ll117
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-movd.mir60
-rw-r--r--llvm/test/CodeGen/AArch64/pr86717.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sadd_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/setcc_knownbits.ll93
-rw-r--r--llvm/test/CodeGen/AArch64/sext.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/shift.ll225
-rw-r--r--llvm/test/CodeGen/AArch64/shuffle-tbl34.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/shufflevector.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll120
-rw-r--r--llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir64
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-body.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/sme-write-vg.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/soft-float-abi.ll161
-rw-r--r--llvm/test/CodeGen/AArch64/srem-vec-crash.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/ssub_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/stackmap.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/tbl-loops.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/trunc-to-tbl.ll28
-rw-r--r--llvm/test/CodeGen/AArch64/uadd_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/usub_sat_vec.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/vcvt-oversize.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add.ll905
-rw-r--r--llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/xor.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/zext.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll274
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir45
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir45
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll232
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll147
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll1063
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll408
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll229
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir68
-rw-r--r--llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/allow-check.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll255
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll51
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll108
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll19
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-conversions.ll357
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll903
-rw-r--r--llvm/test/CodeGen/AMDGPU/clamp.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/convergence-tokens.ll55
-rw-r--r--llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_i128.ll2298
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_v2i128.ll3233
-rw-r--r--llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll103
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll728
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp-classify.ll60
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll270
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoi.i128.ll1502
-rw-r--r--llvm/test/CodeGen/AMDGPU/fract-match.ll167
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll867
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll669
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll5578
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll3960
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll3960
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll5576
-rw-r--r--llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/itofp.i128.ll1618
-rw-r--r--llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll111
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll333
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll317
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll280
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll122
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll122
-rw-r--r--llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll191
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir1154
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-buffer.mir1130
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir28
-rw-r--r--llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir504
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll305
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs.ll8976
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll69
-rw-r--r--llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll78
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/wave32.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-reserved.ll4
-rw-r--r--llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll15
-rw-r--r--llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir2
-rw-r--r--llvm/test/CodeGen/ARM/select.ll399
-rw-r--r--llvm/test/CodeGen/AVR/bug-81911.ll163
-rw-r--r--llvm/test/CodeGen/BPF/addr-space-globals.ll2
-rw-r--r--llvm/test/CodeGen/BPF/addr-space-globals2.ll4
-rw-r--r--llvm/test/CodeGen/BPF/cttz-ctlz.ll304
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll3
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll4
-rw-r--r--llvm/test/CodeGen/DirectX/abs-vec.ll34
-rw-r--r--llvm/test/CodeGen/DirectX/abs.ll38
-rw-r--r--llvm/test/CodeGen/DirectX/any.ll113
-rw-r--r--llvm/test/CodeGen/DirectX/ceil.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/ceil_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/clamp-vec.ll74
-rw-r--r--llvm/test/CodeGen/DirectX/clamp.ll94
-rw-r--r--llvm/test/CodeGen/DirectX/cos.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/cos_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot2_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot3_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot4_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/exp-vec.ll17
-rw-r--r--llvm/test/CodeGen/DirectX/exp.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/fabs.ll32
-rw-r--r--llvm/test/CodeGen/DirectX/fdot.ll94
-rw-r--r--llvm/test/CodeGen/DirectX/floor.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/floor_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/fmax.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/fmin.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/idot.ll100
-rw-r--r--llvm/test/CodeGen/DirectX/isinf.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/isinf_error.ll13
-rw-r--r--llvm/test/CodeGen/DirectX/lerp.ll56
-rw-r--r--llvm/test/CodeGen/DirectX/lib_entry.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/log-vec.ll30
-rw-r--r--llvm/test/CodeGen/DirectX/log.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/log10.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/log2.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/log2_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/pow-vec.ll15
-rw-r--r--llvm/test/CodeGen/DirectX/pow.ll29
-rw-r--r--llvm/test/CodeGen/DirectX/rcp.ll52
-rw-r--r--llvm/test/CodeGen/DirectX/reversebits.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/round.ll35
-rw-r--r--llvm/test/CodeGen/DirectX/round_error.ll4
-rw-r--r--llvm/test/CodeGen/DirectX/rsqrt.ll28
-rw-r--r--llvm/test/CodeGen/DirectX/rsqrt_error.ll14
-rw-r--r--llvm/test/CodeGen/DirectX/smax.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/smin.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/sqrt.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/sqrt_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/trunc.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/trunc_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/umax.ll29
-rw-r--r--llvm/test/CodeGen/DirectX/umin.ll31
-rw-r--r--llvm/test/CodeGen/Generic/ForceStackAlign.ll2
-rw-r--r--llvm/test/CodeGen/Generic/allow-check.ll31
-rw-r--r--llvm/test/CodeGen/Generic/builtin-hot.ll19
-rw-r--r--llvm/test/CodeGen/Generic/gc-lowering.ll62
-rw-r--r--llvm/test/CodeGen/Hexagon/addrmode-immop.mir4
-rw-r--r--llvm/test/CodeGen/Hexagon/build-attributes.ll16
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir2
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll30
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll42
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll42
-rw-r--r--llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir10
-rw-r--r--llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir2
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir (renamed from llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir)9
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir21
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir126
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir4
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir34
-rw-r--r--llvm/test/CodeGen/Mips/atomic-min-max.ll56
-rw-r--r--llvm/test/CodeGen/Mips/avoid-zero-copy.mir2
-rw-r--r--llvm/test/CodeGen/Mips/msa/emergency-spill.mir2
-rw-r--r--llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll10
-rw-r--r--llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll69
-rw-r--r--llvm/test/CodeGen/NVPTX/atomics-sm70.ll142
-rw-r--r--llvm/test/CodeGen/NVPTX/atomics.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/b52037.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/bswap.ll77
-rw-r--r--llvm/test/CodeGen/NVPTX/common-linkage.ll29
-rw-r--r--llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll48
-rw-r--r--llvm/test/CodeGen/NVPTX/weak-global.ll9
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll166
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py59
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll632
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll1066
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll105
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll222
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll53
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll142
-rw-r--r--llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-classify.ll113
-rw-r--r--llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir2
-rw-r--r--llvm/test/CodeGen/PowerPC/rldimi.ll71
-rw-r--r--llvm/test/CodeGen/PowerPC/rlwimi.ll42
-rw-r--r--llvm/test/CodeGen/PowerPC/rlwinm.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll58
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll58
-rw-r--r--llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data-large-array.ll16
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll8
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll110
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir345
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir300
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir139
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll948
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir33
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir10
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir356
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir410
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir400
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir228
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir110
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir425
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir558
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir48
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir25
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll936
-rw-r--r--llvm/test/CodeGen/RISCV/allow-check.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/attributes-module-flag.ll17
-rw-r--r--llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll102
-rw-r--r--llvm/test/CodeGen/RISCV/double-arith-strict.ll174
-rw-r--r--llvm/test/CodeGen/RISCV/double-arith.ll358
-rw-r--r--llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/double-br-fcmp.ll210
-rw-r--r--llvm/test/CodeGen/RISCV/double-calling-conv.ll45
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert-strict.ll78
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert.ll365
-rw-r--r--llvm/test/CodeGen/RISCV/double-fcmp-strict.ll400
-rw-r--r--llvm/test/CodeGen/RISCV/double-fcmp.ll140
-rw-r--r--llvm/test/CodeGen/RISCV/double-imm.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/double-intrinsics.ll127
-rw-r--r--llvm/test/CodeGen/RISCV/double-isnan.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/double-maximum-minimum.ll136
-rw-r--r--llvm/test/CodeGen/RISCV/double-mem.ll86
-rw-r--r--llvm/test/CodeGen/RISCV/double-previous-failure.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/double-round-conv-sat.ll1092
-rw-r--r--llvm/test/CodeGen/RISCV/double-round-conv.ll210
-rw-r--r--llvm/test/CodeGen/RISCV/double-select-fcmp.ll237
-rw-r--r--llvm/test/CodeGen/RISCV/double-select-icmp.ll224
-rw-r--r--llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/float-convert.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/float-round-conv-sat.ll168
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert-strict.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert.ll107
-rw-r--r--llvm/test/CodeGen/RISCV/half-round-conv-sat.ll336
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/live-sp.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/machine-combiner.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/make-compressible-zbc.mir585
-rw-r--r--llvm/test/CodeGen/RISCV/misched-postra-direction.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/module-elf-flags.ll13
-rw-r--r--llvm/test/CodeGen/RISCV/pr64645.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rv32xtheadbb.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbb.ll447
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-typepromotion.ll27
-rw-r--r--llvm/test/CodeGen/RISCV/rv64xtheadbb.ll209
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zba.ll179
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbb.ll438
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/abd.ll343
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/binop-zext.ll154
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll95
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv.ll163
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/compressstore.ll871
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll727
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll1004
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll928
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll1868
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll189
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll53
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll124
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll920
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll27
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll541
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll368
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll320
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/spill-fill-fold.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/stack-inst-compress.mir3
-rw-r--r--llvm/test/CodeGen/RISCV/strip-w-suffix.ll74
-rw-r--r--llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-large-spill.mir74
-rw-r--r--llvm/test/CodeGen/SPIRV/ComparePointers.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/ExecutionMode.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/LinkOnceODR.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/assume.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/capability-kernel.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-logical.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-module.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-opencl32.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty-opencl64.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/empty.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/expect.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll9
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/fence.ll54
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll68
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll31
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll32
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll32
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll69
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll12
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll25
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll37
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll97
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll57
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll29
-rw-r--r--llvm/test/CodeGen/SPIRV/relationals.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll9
-rw-r--r--llvm/test/CodeGen/SPIRV/simple.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fadd.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fmod.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fmul.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fneg.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/frem.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/fsub.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/global_block.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/isequal.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-load-06.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll31
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-memops.ll739
-rw-r--r--llvm/test/CodeGen/SystemZ/atomic-store-06.ll5
-rw-r--r--llvm/test/CodeGen/SystemZ/call-zos-01.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/call-zos-i128.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/call-zos-vararg.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir1
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-04.mir3
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-08.mir3
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir3
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir7
-rw-r--r--llvm/test/CodeGen/SystemZ/frame-28.mir4
-rw-r--r--llvm/test/CodeGen/SystemZ/frame-adjstack.ll16
-rw-r--r--llvm/test/CodeGen/SystemZ/int-cmp-56.mir4
-rw-r--r--llvm/test/CodeGen/SystemZ/int-usub-12.ll22
-rw-r--r--llvm/test/CodeGen/SystemZ/int-usub-13.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/readcyclecounter.ll27
-rw-r--r--llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/swifterror.ll8
-rw-r--r--llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll16
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll6
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-landingpad.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-ppa2.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll14
-rw-r--r--llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll3
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-increment.ll16
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll1
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll4
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll3
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vldst4.ll5
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir25
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll4
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll5
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll35
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll37
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll6
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll22
-rw-r--r--llvm/test/CodeGen/WebAssembly/pr63817.ll15
-rw-r--r--llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll12
-rw-r--r--llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll8
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir25
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir2
-rw-r--r--llvm/test/CodeGen/X86/addcarry.ll23
-rw-r--r--llvm/test/CodeGen/X86/allow-check.ll28
-rw-r--r--llvm/test/CodeGen/X86/apx/add.ll90
-rw-r--r--llvm/test/CodeGen/X86/apx/cfcmov.ll95
-rw-r--r--llvm/test/CodeGen/X86/apx/domain-reassignment.mir929
-rw-r--r--llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir102
-rw-r--r--llvm/test/CodeGen/X86/apx/foldimmediate.mir70
-rw-r--r--llvm/test/CodeGen/X86/apx/inc.ll24
-rw-r--r--llvm/test/CodeGen/X86/apx/shift-eflags.ll22
-rw-r--r--llvm/test/CodeGen/X86/apx/sub.ll80
-rw-r--r--llvm/test/CodeGen/X86/asm-dialect-module.ll10
-rw-r--r--llvm/test/CodeGen/X86/avgceils.ll3821
-rw-r--r--llvm/test/CodeGen/X86/avgceilu.ll2187
-rw-r--r--llvm/test/CodeGen/X86/avgfloors.ll3437
-rw-r--r--llvm/test/CodeGen/X86/avgflooru.ll2629
-rw-r--r--llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/callbr-asm-kill.mir1
-rw-r--r--llvm/test/CodeGen/X86/cmov.ll139
-rw-r--r--llvm/test/CodeGen/X86/cmp.ll13
-rw-r--r--llvm/test/CodeGen/X86/combine-pavg.ll46
-rw-r--r--llvm/test/CodeGen/X86/combine-sra.ll273
-rw-r--r--llvm/test/CodeGen/X86/dagcombine-shifts.ll127
-rw-r--r--llvm/test/CodeGen/X86/extractelement-load.ll364
-rw-r--r--llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir1
-rw-r--r--llvm/test/CodeGen/X86/heap-alloc-markers.mir1
-rw-r--r--llvm/test/CodeGen/X86/huge-stack-offset.ll4
-rw-r--r--llvm/test/CodeGen/X86/huge-stack-offset2.ll2
-rw-r--r--llvm/test/CodeGen/X86/insertelement-var-index.ll44
-rw-r--r--llvm/test/CodeGen/X86/instr-symbols.mir1
-rw-r--r--llvm/test/CodeGen/X86/int-to-fp-demanded.ll382
-rw-r--r--llvm/test/CodeGen/X86/isel-select-cmov.ll50
-rw-r--r--llvm/test/CodeGen/X86/isel-traps.ll73
-rw-r--r--llvm/test/CodeGen/X86/known-never-zero.ll1831
-rw-r--r--llvm/test/CodeGen/X86/late-remat-update.mir1
-rw-r--r--llvm/test/CodeGen/X86/limit-split-cost.mir1
-rw-r--r--llvm/test/CodeGen/X86/masked_store.ll793
-rw-r--r--llvm/test/CodeGen/X86/oddshuffles.ll25
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-0.ll1
-rw-r--r--llvm/test/CodeGen/X86/pr45378.ll40
-rw-r--r--llvm/test/CodeGen/X86/pr85681.ll41
-rw-r--r--llvm/test/CodeGen/X86/pr86305.ll74
-rw-r--r--llvm/test/CodeGen/X86/pr86880.mir21
-rw-r--r--llvm/test/CodeGen/X86/regalloc-copy-hints.mir1
-rw-r--r--llvm/test/CodeGen/X86/sar_fold.ll41
-rw-r--r--llvm/test/CodeGen/X86/setcc-non-simple-type.ll36
-rw-r--r--llvm/test/CodeGen/X86/shrink_vmul.ll223
-rw-r--r--llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll6
-rw-r--r--llvm/test/CodeGen/X86/stack-protector.ll9
-rw-r--r--llvm/test/CodeGen/X86/statepoint-fastregalloc.mir4
-rw-r--r--llvm/test/CodeGen/X86/statepoint-fixup-undef.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-vreg-folding.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-vreg.mir2
-rw-r--r--llvm/test/CodeGen/X86/tls-align.ll2
-rw-r--r--llvm/test/CodeGen/X86/tls-desc.ll199
-rw-r--r--llvm/test/CodeGen/X86/tls-loads-control3.ll5
-rw-r--r--llvm/test/CodeGen/X86/var-permute-128.ll32
-rw-r--r--llvm/test/CodeGen/X86/vec_int_to_fp.ll305
-rw-r--r--llvm/test/CodeGen/X86/vector-half-conversions.ll254
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll2911
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll736
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll6402
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll30
-rw-r--r--llvm/test/CodeGen/X86/vpdpwssd.ll12
-rw-r--r--llvm/test/CodeGen/X86/widen_fadd.ll91
-rw-r--r--llvm/test/CodeGen/X86/widen_fmul.ll91
-rw-r--r--llvm/test/CodeGen/X86/widen_fsub.ll91
-rw-r--r--llvm/test/DebugInfo/AArch64/ptrauth.ll70
-rw-r--r--llvm/test/DebugInfo/ARM/hardware-loop-phi-insertion.ll84
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir2
-rw-r--r--llvm/test/DebugInfo/MIR/X86/debug-loc-0.mir2
-rw-r--r--llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir2
-rw-r--r--llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir2
-rw-r--r--llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/X86/prolog-epilog-indirection.mir1
-rw-r--r--llvm/test/DebugInfo/NVPTX/no-extra-loc.ll26
-rw-r--r--llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll65
-rw-r--r--llvm/test/DebugInfo/X86/live-debug-vars-dse.mir2
-rw-r--r--llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir4
-rw-r--r--llvm/test/DebugInfo/X86/prolog-params.mir2
-rw-r--r--llvm/test/DebugInfo/X86/tu-to-non-tu.ll8
-rwxr-xr-xllvm/test/DebugInfo/dpvalue-print-nocrash.ll2
-rw-r--r--llvm/test/DebugInfo/print-non-instruction-debug-info.ll4
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s43
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s30
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll46
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll5
-rw-r--r--llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail6.ll1
-rw-r--r--llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail7.ll1
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll2
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll2
-rw-r--r--llvm/test/Instrumentation/ThreadSanitizer/atomic.ll20
-rw-r--r--llvm/test/LTO/AArch64/link-branch-target-enforcement.ll1
-rw-r--r--llvm/test/LTO/AArch64/link-sign-return-address.ll43
-rw-r--r--llvm/test/Linker/link-arm-and-thumb.ll7
-rw-r--r--llvm/test/MC/AArch64/coff-relocations.s12
-rw-r--r--llvm/test/MC/AArch64/constant-pool-sizes.s25
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_sop1.s3
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s278
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_sop1.s5
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vbuffer_mubuf.s44
-rw-r--r--llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s27
-rw-r--r--llvm/test/MC/AMDGPU/hsa-gfx12-v4.s6
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s281
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s190
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s186
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s184
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s168
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s171
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s148
-rw-r--r--llvm/test/MC/AMDGPU/hsa-tg-split.s74
-rw-r--r--llvm/test/MC/AMDGPU/mcexpr_amd.s130
-rw-r--r--llvm/test/MC/AMDGPU/mcexpr_amd_err.s53
-rw-r--r--llvm/test/MC/AMDGPU/vinterp-fake16.s182
-rw-r--r--llvm/test/MC/ARM/arm-branch-errors.s4
-rw-r--r--llvm/test/MC/ARM/arm11-hint-instr.s8
-rw-r--r--llvm/test/MC/ARM/basic-arm-instructions.s12
-rw-r--r--llvm/test/MC/ARM/cde-fp-vec.s22
-rw-r--r--llvm/test/MC/ARM/cde-vec-pred.s2
-rw-r--r--llvm/test/MC/ARM/cps.s2
-rw-r--r--llvm/test/MC/ARM/diagnostics.s2
-rw-r--r--llvm/test/MC/ARM/directive-arch_extension-crypto.s15
-rw-r--r--llvm/test/MC/ARM/invalid-fp-armv8.s4
-rw-r--r--llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s8
-rw-r--r--llvm/test/MC/ARM/load-store-acquire-release-v8.s17
-rw-r--r--llvm/test/MC/ARM/lsl-zero-errors.s8
-rw-r--r--llvm/test/MC/ARM/mve-load-store.s912
-rw-r--r--llvm/test/MC/ARM/mve-misc.s59
-rw-r--r--llvm/test/MC/ARM/neon-complex.s24
-rw-r--r--llvm/test/MC/ARM/no-mve.s6
-rw-r--r--llvm/test/MC/ARM/not-armv4.s14
-rw-r--r--llvm/test/MC/ARM/register-token-source-loc.s3
-rw-r--r--llvm/test/MC/ARM/tMOVSr.s5
-rw-r--r--llvm/test/MC/ARM/thumb-diagnostics.s33
-rw-r--r--llvm/test/MC/ARM/thumb-mov.s16
-rw-r--r--llvm/test/MC/ARM/thumb2-diagnostics.s4
-rw-r--r--llvm/test/MC/ARM/vfp4.s4
-rw-r--r--llvm/test/MC/BPF/insn-unit.s2
-rw-r--r--llvm/test/MC/COFF/dwarf5lineinfo.s13
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_sop1.txt3
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt251
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt9
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt5
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt251
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt252
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/cfcmov.txt842
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/cmov.txt386
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/evex-format.txt32
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/imulzu.txt50
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/reverse-encoding.txt14
-rw-r--r--llvm/test/MC/GOFF/ppa1.ll10
-rw-r--r--llvm/test/MC/Hexagon/directive-attribute-err.s24
-rw-r--r--llvm/test/MC/Hexagon/directive-attribute.s41
-rw-r--r--llvm/test/MC/Hexagon/hexagon_attributes.s94
-rw-r--r--llvm/test/MC/LoongArch/Macros/macros-li-bad.s6
-rw-r--r--llvm/test/MC/RISCV/rv32-machine-csr-names.s56
-rw-r--r--llvm/test/MC/RISCV/rv32zcmp-invalid.s12
-rw-r--r--llvm/test/MC/RISCV/rv64zcmp-invalid.s12
-rw-r--r--llvm/test/MC/RISCV/rvv/zvkned-invalid.s23
-rw-r--r--llvm/test/MC/RISCV/rvv/zvknh-invalid.s26
-rw-r--r--llvm/test/MC/RISCV/rvv/zvksed-invalid.s6
-rw-r--r--llvm/test/MC/RISCV/rvv/zvksh-invalid.s10
-rw-r--r--llvm/test/MC/RISCV/rvv/zvksh.s7
-rw-r--r--llvm/test/MC/RISCV/xsifive-invalid.s20
-rw-r--r--llvm/test/MC/RISCV/xsifive-valid.s36
-rw-r--r--llvm/test/MC/WebAssembly/module-asm.ll25
-rw-r--r--llvm/test/MC/X86/apx/cfcmov-att.s841
-rw-r--r--llvm/test/MC/X86/apx/cfcmov-intel.s841
-rw-r--r--llvm/test/MC/X86/apx/cmov-att.s293
-rw-r--r--llvm/test/MC/X86/apx/cmov-intel.s290
-rw-r--r--llvm/test/MC/X86/apx/evex-format-att.s28
-rw-r--r--llvm/test/MC/X86/apx/evex-format-intel.s28
-rw-r--r--llvm/test/MC/X86/apx/imulzu-att.s41
-rw-r--r--llvm/test/MC/X86/apx/imulzu-intel.s38
-rw-r--r--llvm/test/MachineVerifier/test_adjustsstack.mir26
-rw-r--r--llvm/test/MachineVerifier/test_g_splat_vector.mir4
-rw-r--r--llvm/test/MachineVerifier/test_g_ubsantrap.mir18
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml97
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml95
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml105
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml105
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml107
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml109
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml99
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml97
-rw-r--r--llvm/test/Other/optimize-inrange-gep.ll18
-rw-r--r--llvm/test/TableGen/ConcatenatedSubregs.td9
-rw-r--r--llvm/test/TableGen/ConstraintChecking3.td2
-rw-r--r--llvm/test/TableGen/ConstraintChecking8.td34
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td3
-rw-r--r--llvm/test/TableGen/HwModeEncodeDecode2.td9
-rw-r--r--llvm/test/TableGen/HwModeEncodeDecode3.td71
-rw-r--r--llvm/test/TableGen/HwModeSubRegs.td75
-rw-r--r--llvm/test/TableGen/MacroFusion.td132
-rw-r--r--llvm/test/TableGen/x86-fold-tables.inc9
-rw-r--r--llvm/test/ThinLTO/X86/Inputs/devirt_single_hybrid_bar.ll2
-rw-r--r--llvm/test/ThinLTO/X86/devirt_after_filtering_unreachable.ll2
-rw-r--r--llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll2
-rw-r--r--llvm/test/ThinLTO/X86/devirt_local_same_guid.ll2
-rw-r--r--llvm/test/ThinLTO/X86/lower_type_test_phi.ll4
-rw-r--r--llvm/test/ThinLTO/X86/nodevirt-nonpromoted-typeid.ll2
-rw-r--r--llvm/test/ThinLTO/X86/pseudo-probe-desc-import.ll4
-rw-r--r--llvm/test/ThinLTO/X86/type_test_noindircall.ll4
-rw-r--r--llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll24
-rw-r--r--llvm/test/Transforms/Attributor/align.ll163
-rw-r--r--llvm/test/Transforms/Attributor/nocapture-1.ll12
-rw-r--r--llvm/test/Transforms/Attributor/nofpclass-implied-by-fcmp.ll12
-rw-r--r--llvm/test/Transforms/Attributor/nofpclass.ll2
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/AArch64/fpclass-test.ll134
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/RISCV/fpclass-test.ll134
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/X86/fpclass-test.ll178
-rw-r--r--llvm/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll37
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail6.ll1
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail7.ll13
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/basic.ll56
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll20
-rw-r--r--llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll2
-rw-r--r--llvm/test/Transforms/DFAJumpThreading/unpredictable-heuristic.ll124
-rw-r--r--llvm/test/Transforms/ExpandLargeDivRem/X86/vector.ll536
-rw-r--r--llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll16
-rw-r--r--llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll16
-rw-r--r--llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-si129tofp.ll30
-rw-r--r--llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-ui129tofp.ll30
-rw-r--r--llvm/test/Transforms/Float2Int/basic.ll317
-rw-r--r--llvm/test/Transforms/Float2Int/pr79158.ll73
-rw-r--r--llvm/test/Transforms/FunctionAttrs/noundef.ll14
-rw-r--r--llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll63
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll4
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll4
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll4
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll4
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll4
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll6
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll6
-rw-r--r--llvm/test/Transforms/GlobalDCE/virtual-functions.ll4
-rw-r--r--llvm/test/Transforms/GlobalDCE/vtable-rtti.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll2
-rw-r--r--llvm/test/Transforms/GlobalSplit/basic.ll20
-rw-r--r--llvm/test/Transforms/GlobalSplit/non-beneficial.ll2
-rw-r--r--llvm/test/Transforms/GlobalSplit/nonlocal.ll2
-rw-r--r--llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll4
-rw-r--r--llvm/test/Transforms/IRCE/compound-loop-bound.ll85
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-vaarg.ll12
-rw-r--r--llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-no-return-functions.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll52
-rw-r--r--llvm/test/Transforms/Inline/RISCV/inline-target-features.ll34
-rw-r--r--llvm/test/Transforms/Inline/RISCV/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/Inline/devirtualize-4.ll6
-rw-r--r--llvm/test/Transforms/Inline/update_invoke_prof.ll64
-rw-r--r--llvm/test/Transforms/Inline/update_value_profile.ll81
-rw-r--r--llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll140
-rw-r--r--llvm/test/Transforms/InstCombine/X86/x86-avx512.ll140
-rw-r--r--llvm/test/Transforms/InstCombine/add.ll76
-rw-r--r--llvm/test/Transforms/InstCombine/allow-checks.ll44
-rw-r--r--llvm/test/Transforms/InstCombine/and-or-implied-cond-not.ll69
-rw-r--r--llvm/test/Transforms/InstCombine/apint-shl-trunc.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/assume.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/binop-itofp.ll122
-rw-r--r--llvm/test/Transforms/InstCombine/builtin-hot.ll25
-rw-r--r--llvm/test/Transforms/InstCombine/cast.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/catchswitch-phi.ll19
-rw-r--r--llvm/test/Transforms/InstCombine/copysign-fneg-fabs.ll79
-rw-r--r--llvm/test/Transforms/InstCombine/div.ll22
-rw-r--r--llvm/test/Transforms/InstCombine/extract-select-agg.ll83
-rw-r--r--llvm/test/Transforms/InstCombine/fcmp.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/fmul.ll103
-rw-r--r--llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll56
-rw-r--r--llvm/test/Transforms/InstCombine/fpcast.ll85
-rw-r--r--llvm/test/Transforms/InstCombine/freeze.ll19
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll269
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-mul-and.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-mul-zext.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll93
-rw-r--r--llvm/test/Transforms/InstCombine/intrinsic-select.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/known-bits.ll51
-rw-r--r--llvm/test/Transforms/InstCombine/mul-masked-bits.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/mul.ll96
-rw-r--r--llvm/test/Transforms/InstCombine/not.ll203
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll21
-rw-r--r--llvm/test/Transforms/InstCombine/powi.ll154
-rw-r--r--llvm/test/Transforms/InstCombine/pr63791.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/ptr-int-cast.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll18
-rw-r--r--llvm/test/Transforms/InstCombine/sadd-with-overflow.ll32
-rw-r--r--llvm/test/Transforms/InstCombine/scalarization.ll11
-rw-r--r--llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll196
-rw-r--r--llvm/test/Transforms/InstCombine/select.ll62
-rw-r--r--llvm/test/Transforms/InstCombine/select_meta.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/shift-add.ll29
-rw-r--r--llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll93
-rw-r--r--llvm/test/Transforms/InstCombine/shift.ll7
-rw-r--r--llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/shuffle_select.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/sub-xor-cmp.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/trunc.ll37
-rw-r--r--llvm/test/Transforms/InstCombine/uadd-with-overflow.ll23
-rw-r--r--llvm/test/Transforms/InstCombine/zext.ll31
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/gep.ll12
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll8
-rw-r--r--llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll44
-rw-r--r--llvm/test/Transforms/InstSimplify/icmp-constant.ll128
-rw-r--r--llvm/test/Transforms/InstSimplify/shift-knownbits.ll139
-rw-r--r--llvm/test/Transforms/Internalize/vcall-visibility.ll2
-rw-r--r--llvm/test/Transforms/LICM/expr-reassociate-int.ll59
-rw-r--r--llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll31
-rw-r--r--llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll4
-rw-r--r--llvm/test/Transforms/LoopRotate/dbgvalue.ll2
-rw-r--r--llvm/test/Transforms/LoopRotate/update-branch-weights.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll100
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll63
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll148
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll74
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll49
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll13
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll132
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll154
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll19
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr81872.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/small-size.ll236
-rw-r--r--llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll261
-rw-r--r--llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll61
-rw-r--r--llvm/test/Transforms/LoopVectorize/pointer-induction.ll99
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll58
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop.ll17
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll8
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll484
-rw-r--r--llvm/test/Transforms/MergeFunc/constexpr.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-25-DominatedLoop.ll59
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-25-InfiniteLoop.ll19
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-25-Loop.ll11
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-25-NestedLoop.ll33
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-25-SinglePredecessor.ll35
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-26-NonRedundant.ll11
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-30-PredIDom.ll221
-rw-r--r--llvm/test/Transforms/NewGVN/2007-07-31-RedundantPhi.ll30
-rw-r--r--llvm/test/Transforms/NewGVN/2008-02-13-NewPHI.ll21
-rw-r--r--llvm/test/Transforms/NewGVN/2008-07-02-Unreachable.ll52
-rw-r--r--llvm/test/Transforms/NewGVN/2008-12-09-SelfRemove.ll49
-rw-r--r--llvm/test/Transforms/NewGVN/2008-12-12-RLE-Crash.ll33
-rw-r--r--llvm/test/Transforms/NewGVN/2008-12-14-rle-reanalyze.ll13
-rw-r--r--llvm/test/Transforms/NewGVN/2008-12-15-CacheVisited.ll23
-rw-r--r--llvm/test/Transforms/NewGVN/2009-01-21-SortInvalidation.ll51
-rw-r--r--llvm/test/Transforms/NewGVN/2009-01-22-SortInvalidation.ll161
-rw-r--r--llvm/test/Transforms/NewGVN/2009-03-10-PREOnVoid.ll67
-rw-r--r--llvm/test/Transforms/NewGVN/2009-07-13-MemDepSortFail.ll67
-rw-r--r--llvm/test/Transforms/NewGVN/2009-11-12-MemDepMallocBitCast.ll11
-rw-r--r--llvm/test/Transforms/NewGVN/2010-03-31-RedundantPHIs.ll27
-rw-r--r--llvm/test/Transforms/NewGVN/2010-05-08-OneBit.ll13
-rw-r--r--llvm/test/Transforms/NewGVN/2010-11-13-Simplify.ll10
-rw-r--r--llvm/test/Transforms/NewGVN/2011-04-27-phioperands.ll27
-rw-r--r--llvm/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll91
-rw-r--r--llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll60
-rw-r--r--llvm/test/Transforms/NewGVN/2012-05-22-PreCrash.ll1
-rw-r--r--llvm/test/Transforms/NewGVN/2016-08-30-MaskedScatterGather-xfail-inseltpoison.ll20
-rw-r--r--llvm/test/Transforms/NewGVN/MemdepMiscompile.ll39
-rw-r--r--llvm/test/Transforms/NewGVN/addrspacecast.ll12
-rw-r--r--llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/basic-undef-test.ll12
-rw-r--r--llvm/test/Transforms/NewGVN/br-identical.ll25
-rw-r--r--llvm/test/Transforms/NewGVN/calloc-load-removal.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/calls-readonly.ll32
-rw-r--r--llvm/test/Transforms/NewGVN/completeness.ll62
-rw-r--r--llvm/test/Transforms/NewGVN/cond_br.ll35
-rw-r--r--llvm/test/Transforms/NewGVN/condprop.ll10
-rw-r--r--llvm/test/Transforms/NewGVN/crash-no-aa.ll1
-rw-r--r--llvm/test/Transforms/NewGVN/crash-usecounts.ll1
-rw-r--r--llvm/test/Transforms/NewGVN/crash.ll7
-rw-r--r--llvm/test/Transforms/NewGVN/cyclic-phi-handling.ll12
-rw-r--r--llvm/test/Transforms/NewGVN/dbg-redundant-load.ll35
-rw-r--r--llvm/test/Transforms/NewGVN/edge.ll168
-rw-r--r--llvm/test/Transforms/NewGVN/eliminate-callsite-inline.ll11
-rw-r--r--llvm/test/Transforms/NewGVN/equivalent-phi.ll14
-rw-r--r--llvm/test/Transforms/NewGVN/fold-const-expr.ll14
-rw-r--r--llvm/test/Transforms/NewGVN/fpmath.ll53
-rw-r--r--llvm/test/Transforms/NewGVN/funclet.ll30
-rw-r--r--llvm/test/Transforms/NewGVN/int_sideeffect.ll37
-rw-r--r--llvm/test/Transforms/NewGVN/invariant.group.ll521
-rw-r--r--llvm/test/Transforms/NewGVN/invariant.start.ll41
-rw-r--r--llvm/test/Transforms/NewGVN/lifetime-simple.ll13
-rw-r--r--llvm/test/Transforms/NewGVN/load-constant-mem.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/load-from-unreachable-predecessor.ll14
-rw-r--r--llvm/test/Transforms/NewGVN/loadforward.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/malloc-load-removal.ll43
-rw-r--r--llvm/test/Transforms/NewGVN/memory-handling.ll134
-rw-r--r--llvm/test/Transforms/NewGVN/metadata-nonnull.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/metadata-simplify.ll24
-rw-r--r--llvm/test/Transforms/NewGVN/noalias.ll38
-rw-r--r--llvm/test/Transforms/NewGVN/nomemlocation.ll16
-rw-r--r--llvm/test/Transforms/NewGVN/non-integral-pointers.ll42
-rw-r--r--llvm/test/Transforms/NewGVN/null-aliases-nothing.ll24
-rw-r--r--llvm/test/Transforms/NewGVN/phi-edge-handling.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/phi-translate-partial-alias.ll18
-rw-r--r--llvm/test/Transforms/NewGVN/pr17732.ll9
-rw-r--r--llvm/test/Transforms/NewGVN/pr17852.ll1
-rw-r--r--llvm/test/Transforms/NewGVN/pr24397.ll1
-rw-r--r--llvm/test/Transforms/NewGVN/pr24426.ll9
-rw-r--r--llvm/test/Transforms/NewGVN/pr25440.ll83
-rw-r--r--llvm/test/Transforms/NewGVN/pr28562.ll9
-rw-r--r--llvm/test/Transforms/NewGVN/pr31472.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr31483.ll26
-rw-r--r--llvm/test/Transforms/NewGVN/pr31491.ll10
-rw-r--r--llvm/test/Transforms/NewGVN/pr31501.ll32
-rw-r--r--llvm/test/Transforms/NewGVN/pr31573.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr31594.ll10
-rw-r--r--llvm/test/Transforms/NewGVN/pr31613.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr31682.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr31758.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr32607.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr32836.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/pr32838.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/pr32845.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr32852.ll16
-rw-r--r--llvm/test/Transforms/NewGVN/pr32897.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr32934.ll62
-rw-r--r--llvm/test/Transforms/NewGVN/pr32945.ll21
-rw-r--r--llvm/test/Transforms/NewGVN/pr32952.ll30
-rw-r--r--llvm/test/Transforms/NewGVN/pr33014.ll47
-rw-r--r--llvm/test/Transforms/NewGVN/pr33086.ll47
-rw-r--r--llvm/test/Transforms/NewGVN/pr33116.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr33187.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr33196.ll57
-rw-r--r--llvm/test/Transforms/NewGVN/pr33204.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr33305.ll30
-rw-r--r--llvm/test/Transforms/NewGVN/pr33367.ll24
-rw-r--r--llvm/test/Transforms/NewGVN/pr34452.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/pr43441.ll30
-rw-r--r--llvm/test/Transforms/NewGVN/pre-compare.ll23
-rw-r--r--llvm/test/Transforms/NewGVN/preserve-metadata-for-predicate-replacements.ll2
-rw-r--r--llvm/test/Transforms/NewGVN/readattrs.ll10
-rw-r--r--llvm/test/Transforms/NewGVN/rle-nonlocal.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/rle.ll37
-rw-r--r--llvm/test/Transforms/NewGVN/simp-to-self.ll19
-rw-r--r--llvm/test/Transforms/NewGVN/stale-loop-info.ll25
-rw-r--r--llvm/test/Transforms/NewGVN/tbaa.ll113
-rw-r--r--llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll7
-rw-r--r--llvm/test/Transforms/NewGVN/verify-memoryphi.ll19
-rw-r--r--llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll47
-rw-r--r--llvm/test/Transforms/PGOProfile/memop_profile_funclet_wasm.ll48
-rw-r--r--llvm/test/Transforms/PGOProfile/vtable_prof_unsupported.ll34
-rw-r--r--llvm/test/Transforms/PGOProfile/vtable_profile.ll98
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll3
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll8
-rw-r--r--llvm/test/Transforms/Reassociate/vaarg_movable.ll4
-rw-r--r--llvm/test/Transforms/SCCP/add-nuw-nsw-flags.ll29
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/gather-buildvector-with-minbitwidth-user.ll89
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/gather-with-minbith-user.ll90
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr2.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll20
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll20
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/slp-frem.ll55
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/user-node-not-in-bitwidths.ll83
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll220
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll29
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll35
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll49
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll33
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll7
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll263
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/SystemZ/ext-not-resized-op-resized.ll48
-rw-r--r--llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll82
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/cmp-after-intrinsic-call-minbitwidth.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/ext-int-reduced-not-operand.ll34
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-icmp-to-trunc.ll75
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll17
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-node-but-not-operands.ll61
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-transformed-operand.ll21
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll49
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll43
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll95
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll110
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll25
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder_phi.ll26
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/resched.ll32
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll10
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/same-scalar-in-same-phi-extract.ll76
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/sext-inseltpoison.ll22
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/sext.ll22
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll70
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll22
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/zext-inseltpoison.ll9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/zext.ll9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll22
-rw-r--r--llvm/test/Transforms/SampleProfile/Inputs/csspgo-import-list-preinliner.prof14
-rw-r--r--llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof16
-rw-r--r--llvm/test/Transforms/SampleProfile/csspgo-import-list-preinliner.ll50
-rw-r--r--llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll67
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll63
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-eh.ll2
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-invoke.ll155
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll7
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/remarks-hotness.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/HoistCode.ll34
-rw-r--r--llvm/test/Transforms/SpeculativeExecution/PR46267.ll2
-rw-r--r--llvm/test/Transforms/TailCallElim/debugloc.ll4
-rw-r--r--llvm/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll2
-rw-r--r--llvm/test/Transforms/VectorCombine/X86/shuffle-inseltpoison.ll6
-rw-r--r--llvm/test/Transforms/VectorCombine/X86/shuffle.ll6
-rw-r--r--llvm/test/Verifier/intrinsic-cmp.ll22
-rw-r--r--llvm/test/Verifier/tbaa-struct.ll40
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected6
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.expected4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.globals.expected4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.expected4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.globals.expected4
-rw-r--r--llvm/test/tools/dsymutil/ARM/obfuscated.test200
-rw-r--r--llvm/test/tools/dsymutil/Inputs/obfuscated.2.arm64bin10339 -> 0 bytes
-rw-r--r--llvm/test/tools/dsymutil/Inputs/obfuscated.2.map22
-rw-r--r--llvm/test/tools/dsymutil/Inputs/obfuscated.arm64bin10434 -> 0 bytes
-rw-r--r--llvm/test/tools/dsymutil/Inputs/obfuscated.map17
-rw-r--r--llvm/test/tools/dsymutil/cmdline.test1
-rw-r--r--llvm/test/tools/dxil-dis/debug-info.ll2
-rw-r--r--llvm/test/tools/llc/new-pm/machine-function-properties.mir12
-rw-r--r--llvm/test/tools/llvm-ar/coff-symtab.test91
-rw-r--r--llvm/test/tools/llvm-ar/ecsymbols.ll19
-rw-r--r--llvm/test/tools/llvm-ar/ecsymbols.yaml84
-rw-r--r--llvm/test/tools/llvm-ar/no-symtab.yaml32
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/DWARF/dw-at-specification.test2
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-ignored-DW_FORM_implicit_const.test2
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test106
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-print-basic-details.test120
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-select-logical-elements.test76
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/02-wasm-logical-lines.test74
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/03-wasm-incorrect-lexical-scope-typedef.test135
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/04-wasm-missing-nested-enumerators.test130
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/05-wasm-incorrect-lexical-scope-variable.test114
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/06-wasm-full-logical-view.test158
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/definitions.h30
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world-clang.s286
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world.cpp7
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860-clang.s457
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860.cpp15
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884-clang.s488
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884.cpp14
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466-clang.s259
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466.cpp11
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test-clang.s366
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test.cpp9
-rw-r--r--llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/README.txt28
-rw-r--r--llvm/test/tools/llvm-lib/arm64ec-implib.test106
-rw-r--r--llvm/test/tools/llvm-lib/empty.test27
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s18
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-sve-instructions.s14
-rw-r--r--llvm/test/tools/llvm-mca/ARM/cortex-a57-basic-instructions.s2
-rw-r--r--llvm/test/tools/llvm-mca/RISCV/SiFive7/gpr-bypass.s14
-rw-r--r--llvm/test/tools/llvm-mca/RISCV/SiFive7/vector-integer-arithmetic.s70
-rw-r--r--llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s38
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s34
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/Inputs/compress-debug-sections.yaml4
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test2
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zstd.test2
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/decompress-sections.test36
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/discard-locals-rel.test4
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test100
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/strip-reloc-symbol.test4
-rwxr-xr-xllvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64.dylibbin0 -> 3206 bytes
-rwxr-xr-xllvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64_32.dylibbin0 -> 2716 bytes
-rw-r--r--llvm/test/tools/llvm-objdump/MachO/AArch64/macho-relative-method-lists.test86
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/vtable-value-prof.proftext74
-rw-r--r--llvm/test/tools/llvm-profdata/vtable-value-prof.test83
-rw-r--r--llvm/test/tools/llvm-profgen/Inputs/coff-profile.exebin0 -> 1629184 bytes
-rw-r--r--llvm/test/tools/llvm-profgen/Inputs/coff-profile.perfscript13
-rw-r--r--llvm/test/tools/llvm-profgen/coff-profile.test79
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test1
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test2
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test1
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/hex-dump-multi.s6
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/hex-dump.test9
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/machine-specific-section-types.test23
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test4
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/string-dump-multi.s6
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/string-dump.test6
-rw-r--r--llvm/test/tools/llvm-reduce/remove-dp-values.ll2
1335 files changed, 112076 insertions, 39272 deletions
diff --git a/llvm/test/Analysis/AliasSet/intrinsics.ll b/llvm/test/Analysis/AliasSet/intrinsics.ll
index aeb5424..0dc802c 100644
--- a/llvm/test/Analysis/AliasSet/intrinsics.ll
+++ b/llvm/test/Analysis/AliasSet/intrinsics.ll
@@ -1,9 +1,8 @@
-; RUN: opt -passes=print-alias-sets -S -o - < %s 2>&1 | FileCheck %s
+; RUN: opt -passes=print-alias-sets -S -o - < %s 2>&1 | FileCheck %s --implicit-check-not="Unknown instructions"
; CHECK: Alias sets for function 'test1':
; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values.
; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %a, LocationSize::precise(1))
-; CHECK-NOT: 1 Unknown instruction
; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %b, LocationSize::precise(1))
define void @test1(i32 %c) {
entry:
@@ -64,7 +63,6 @@ entry:
; CHECK: Alias sets for function 'test5':
; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values.
; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %a, LocationSize::precise(1))
-; CHECK-NOT: 1 Unknown instruction
; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %b, LocationSize::precise(1))
define void @test5() {
entry:
@@ -76,6 +74,36 @@ entry:
ret void
}
+; CHECK: Alias sets for function 'test_runtime':
+; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values.
+; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %a, LocationSize::precise(1))
+; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %b, LocationSize::precise(1))
+define i1 @test_runtime() local_unnamed_addr {
+entry:
+ %a = alloca i8, align 1
+ %b = alloca i8, align 1
+ store i8 1, ptr %a, align 1
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ store i8 1, ptr %b, align 1
+ ret i1 %allow
+}
+
+; CHECK: Alias sets for function 'test_ubsan':
+; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values.
+; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %a, LocationSize::precise(1))
+; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Memory locations: (ptr %b, LocationSize::precise(1))
+define i1 @test_ubsan() local_unnamed_addr {
+entry:
+ %a = alloca i8, align 1
+ %b = alloca i8, align 1
+ store i8 1, ptr %a, align 1
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ store i8 1, ptr %b, align 1
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8)
+declare i1 @llvm.allow.runtime.check(metadata)
declare void @llvm.assume(i1)
declare void @llvm.experimental.guard(i1, ...)
declare void @llvm.experimental.noalias.scope.decl(metadata)
diff --git a/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll b/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll
index d58f334..2823ab4 100644
--- a/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll
+++ b/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll
@@ -124,7 +124,7 @@ define void @log2(float %a, <16 x float> %va) {
ret void
}
-define void @constrained_fadd(float %a, <16 x float> %va) {
+define void @constrained_fadd(float %a, <16 x float> %va) strictfp {
; THRU-LABEL: 'constrained_fadd'
; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
; THRU-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
diff --git a/llvm/test/Analysis/CostModel/RISCV/cast.ll b/llvm/test/Analysis/CostModel/RISCV/cast.ll
index bd26c19..6ddd57a 100644
--- a/llvm/test/Analysis/CostModel/RISCV/cast.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/cast.ll
@@ -16,74 +16,74 @@ define void @sext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = sext <2 x i1> undef to <2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = sext <4 x i8> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = sext <4 x i8> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = sext <4 x i16> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = sext <4 x i1> undef to <4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = sext <4 x i1> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = sext <4 x i1> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = sext <8 x i8> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = sext <8 x i1> undef to <8 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = sext <8 x i1> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = sext <16 x i1> undef to <16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = sext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = sext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = sext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -96,73 +96,73 @@ define void @sext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = sext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = sext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = sext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = sext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = sext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = sext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = sext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = sext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = sext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = sext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = sext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i8_nxv64i64 = sext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 69 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i1_nxv64i64 = sext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = sext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = sext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = sext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = sext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -179,74 +179,74 @@ define void @sext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = sext <2 x i1> undef to <2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = sext <4 x i8> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = sext <4 x i8> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = sext <4 x i16> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = sext <4 x i1> undef to <4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = sext <4 x i1> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = sext <4 x i1> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = sext <8 x i8> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = sext <8 x i1> undef to <8 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = sext <8 x i1> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = sext <16 x i1> undef to <16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = sext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = sext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = sext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -259,73 +259,73 @@ define void @sext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = sext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = sext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = sext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = sext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = sext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = sext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = sext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = sext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = sext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = sext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = sext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i8_nxv64i64 = sext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %nxv64i1_nxv64i64 = sext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i8_nxv64i64 = sext <vscale x 64 x i8> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %nxv64i1_nxv64i64 = sext <vscale x 64 x i1> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = sext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = sext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = sext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = sext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -522,74 +522,74 @@ define void @zext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = zext <2 x i1> undef to <2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = zext <4 x i8> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = zext <4 x i8> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = zext <4 x i16> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = zext <4 x i1> undef to <4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = zext <4 x i1> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = zext <4 x i1> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = zext <8 x i8> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = zext <8 x i1> undef to <8 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = zext <8 x i1> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = zext <16 x i1> undef to <16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = zext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = zext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = zext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -602,73 +602,73 @@ define void @zext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = zext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = zext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = zext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = zext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = zext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = zext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = zext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = zext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = zext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = zext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = zext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i8_nxv64i64 = zext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 69 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i1_nxv64i64 = zext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = zext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = zext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = zext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = zext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -685,74 +685,74 @@ define void @zext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = zext <2 x i1> undef to <2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = zext <4 x i8> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = zext <4 x i8> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = zext <4 x i16> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = zext <4 x i1> undef to <4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = zext <4 x i1> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = zext <4 x i1> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = zext <8 x i8> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = zext <8 x i1> undef to <8 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = zext <8 x i1> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = zext <16 x i1> undef to <16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = zext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = zext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = zext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -765,73 +765,73 @@ define void @zext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = zext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = zext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = zext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = zext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = zext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = zext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = zext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = zext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = zext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = zext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = zext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i8_nxv64i64 = zext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %nxv64i1_nxv64i64 = zext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i8_nxv64i64 = zext <vscale x 64 x i8> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %nxv64i1_nxv64i64 = zext <vscale x 64 x i1> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = zext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = zext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = zext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = zext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -1035,17 +1035,17 @@ define void @trunc() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i1 = trunc <4 x i8> undef to <4 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i1 = trunc <4 x i16> undef to <4 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i1 = trunc <4 x i32> undef to <4 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i64_v4i1 = trunc <4 x i64> undef to <4 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i64_v4i1 = trunc <4 x i64> undef to <4 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i8 = trunc <8 x i16> undef to <8 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i32_v8i8 = trunc <8 x i32> undef to <8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i64_v8i8 = trunc <8 x i64> undef to <8 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i64_v8i8 = trunc <8 x i64> undef to <8 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i16 = trunc <8 x i32> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i64_v8i16 = trunc <8 x i64> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i64_v8i32 = trunc <8 x i64> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i64_v8i16 = trunc <8 x i64> undef to <8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i64_v8i32 = trunc <8 x i64> undef to <8 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i1 = trunc <8 x i8> undef to <8 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i1 = trunc <8 x i16> undef to <8 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i32_v8i1 = trunc <8 x i32> undef to <8 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i64_v8i1 = trunc <8 x i64> undef to <8 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i1 = trunc <8 x i32> undef to <8 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i64_v8i1 = trunc <8 x i64> undef to <8 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i8 = trunc <2 x i16> undef to <2 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i32_v16i8 = trunc <2 x i32> undef to <2 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i64_v16i8 = trunc <2 x i64> undef to <2 x i8>
@@ -1057,44 +1057,44 @@ define void @trunc() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i32_v16i1 = trunc <2 x i32> undef to <2 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i64_v16i1 = trunc <2 x i64> undef to <2 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i8 = trunc <16 x i16> undef to <16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i32_v32i8 = trunc <16 x i32> undef to <16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i64_v32i8 = trunc <16 x i64> undef to <16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i32_v32i16 = trunc <16 x i32> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i64_v32i16 = trunc <16 x i64> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i64_v32i32 = trunc <16 x i64> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i8 = trunc <16 x i32> undef to <16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i64_v32i8 = trunc <16 x i64> undef to <16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i32_v32i16 = trunc <16 x i32> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i64_v32i16 = trunc <16 x i64> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i64_v32i32 = trunc <16 x i64> undef to <16 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i8_v32i1 = trunc <16 x i8> undef to <16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i16_v32i1 = trunc <16 x i16> undef to <16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i32_v32i1 = trunc <16 x i32> undef to <16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i64_v32i1 = trunc <16 x i64> undef to <16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i16_v64i8 = trunc <64 x i16> undef to <64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i32_v64i8 = trunc <64 x i32> undef to <64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v64i64_v64i8 = trunc <64 x i64> undef to <64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i32_v64i16 = trunc <64 x i32> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i64_v64i16 = trunc <64 x i64> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i64_v64i32 = trunc <64 x i64> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i8_v64i1 = trunc <64 x i8> undef to <64 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i16_v64i1 = trunc <64 x i16> undef to <64 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i32_v64i1 = trunc <64 x i32> undef to <64 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i16_v32i1 = trunc <16 x i16> undef to <16 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i32_v32i1 = trunc <16 x i32> undef to <16 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i64_v32i1 = trunc <16 x i64> undef to <16 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v64i16_v64i8 = trunc <64 x i16> undef to <64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %v64i32_v64i8 = trunc <64 x i32> undef to <64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %v64i64_v64i8 = trunc <64 x i64> undef to <64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v64i32_v64i16 = trunc <64 x i32> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v64i64_v64i16 = trunc <64 x i64> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v64i64_v64i32 = trunc <64 x i64> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i1 = trunc <64 x i8> undef to <64 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i16_v64i1 = trunc <64 x i16> undef to <64 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i32_v64i1 = trunc <64 x i32> undef to <64 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i64_v64i1 = trunc <64 x i64> undef to <64 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i16_v128i8 = trunc <128 x i16> undef to <128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i32_v128i8 = trunc <128 x i32> undef to <128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %v128i64_v128i8 = trunc <128 x i64> undef to <128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i32_v128i16 = trunc <128 x i32> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v128i64_v128i16 = trunc <128 x i64> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i64_v128i32 = trunc <128 x i64> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i8_v128i1 = trunc <128 x i8> undef to <128 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i16_v128i1 = trunc <128 x i16> undef to <128 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i32_v128i1 = trunc <128 x i32> undef to <128 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v128i16_v128i8 = trunc <128 x i16> undef to <128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v128i32_v128i8 = trunc <128 x i32> undef to <128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %v128i64_v128i8 = trunc <128 x i64> undef to <128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v128i32_v128i16 = trunc <128 x i32> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %v128i64_v128i16 = trunc <128 x i64> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %v128i64_v128i32 = trunc <128 x i64> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i8_v128i1 = trunc <128 x i8> undef to <128 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i16_v128i1 = trunc <128 x i16> undef to <128 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i32_v128i1 = trunc <128 x i32> undef to <128 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i64_v128i1 = trunc <128 x i64> undef to <128 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i16_v256i8 = trunc <256 x i16> undef to <256 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i32_v256i8 = trunc <256 x i32> undef to <256 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v256i64_v256i8 = trunc <256 x i64> undef to <256 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i32_v256i16 = trunc <256 x i32> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %v256i64_v256i16 = trunc <256 x i64> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i64_v256i32 = trunc <256 x i64> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i8_v256i1 = trunc <256 x i8> undef to <256 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i16_v256i1 = trunc <256 x i16> undef to <256 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i32_v256i1 = trunc <256 x i32> undef to <256 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v256i16_v256i8 = trunc <256 x i16> undef to <256 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %v256i32_v256i8 = trunc <256 x i32> undef to <256 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 126 for instruction: %v256i64_v256i8 = trunc <256 x i64> undef to <256 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %v256i32_v256i16 = trunc <256 x i32> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %v256i64_v256i16 = trunc <256 x i64> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %v256i64_v256i32 = trunc <256 x i64> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i8_v256i1 = trunc <256 x i8> undef to <256 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i16_v256i1 = trunc <256 x i16> undef to <256 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i32_v256i1 = trunc <256 x i32> undef to <256 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i64_v256i1 = trunc <256 x i64> undef to <256 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i16_nxv1i8 = trunc <vscale x 1 x i16> undef to <vscale x 1 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i32_nxv1i8 = trunc <vscale x 1 x i32> undef to <vscale x 1 x i8>
@@ -1115,56 +1115,56 @@ define void @trunc() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i1 = trunc <vscale x 2 x i8> undef to <vscale x 2 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i1 = trunc <vscale x 2 x i16> undef to <vscale x 2 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i1 = trunc <vscale x 2 x i32> undef to <vscale x 2 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i64_nxv2i1 = trunc <vscale x 2 x i64> undef to <vscale x 2 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i64_nxv2i1 = trunc <vscale x 2 x i64> undef to <vscale x 2 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i8 = trunc <vscale x 4 x i16> undef to <vscale x 4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i32_nxv4i8 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i64_nxv4i8 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i64_nxv4i8 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i16 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i64_nxv4i16 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i64_nxv4i32 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i64_nxv4i16 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i64_nxv4i32 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i1 = trunc <vscale x 4 x i8> undef to <vscale x 4 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i1 = trunc <vscale x 4 x i16> undef to <vscale x 4 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i32_nxv4i1 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i64_nxv4i1 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i1 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i64_nxv4i1 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i8 = trunc <vscale x 8 x i16> undef to <vscale x 8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i32_nxv8i8 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i64_nxv8i8 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i16 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i64_nxv8i16 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i64_nxv8i32 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i32_nxv8i8 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i64_nxv8i8 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i32_nxv8i16 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i64_nxv8i16 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i64_nxv8i32 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i1 = trunc <vscale x 8 x i8> undef to <vscale x 8 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i16_nxv8i1 = trunc <vscale x 8 x i16> undef to <vscale x 8 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i32_nxv8i1 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i64_nxv8i1 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i8 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i32_nxv16i8 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i64_nxv16i8 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i32_nxv16i16 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i64_nxv16i16 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i64_nxv16i32 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i8_nxv16i1 = trunc <vscale x 16 x i8> undef to <vscale x 16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i16_nxv16i1 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i32_nxv16i1 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i64_nxv16i1 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i16_nxv32i8 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i32_nxv32i8 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv32i64_nxv32i8 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i32_nxv32i16 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i64_nxv32i16 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i64_nxv32i32 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i8_nxv32i1 = trunc <vscale x 32 x i8> undef to <vscale x 32 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i16_nxv32i1 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i32_nxv32i1 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i64_nxv32i1 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i16_nxv64i8 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i32_nxv64i8 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i1 = trunc <vscale x 8 x i16> undef to <vscale x 8 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i1 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i64_nxv8i1 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i16_nxv16i8 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i32_nxv16i8 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv16i64_nxv16i8 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i32_nxv16i16 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv16i64_nxv16i16 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i64_nxv16i32 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i1 = trunc <vscale x 16 x i8> undef to <vscale x 16 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i1 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i32_nxv16i1 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i64_nxv16i1 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i16_nxv32i8 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv32i32_nxv32i8 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %nxv32i64_nxv32i8 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i32_nxv32i16 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %nxv32i64_nxv32i16 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv32i64_nxv32i32 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i1 = trunc <vscale x 32 x i8> undef to <vscale x 32 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i16_nxv32i1 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i32_nxv32i1 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i64_nxv32i1 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv64i16_nxv64i8 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %nxv64i32_nxv64i8 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i8>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i64_nxv64i8 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i32_nxv64i16 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %nxv64i64_nxv64i16 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv64i64_nxv64i32 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i8_nxv64i1 = trunc <vscale x 64 x i8> undef to <vscale x 64 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i16_nxv64i1 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i1>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i32_nxv64i1 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv64i32_nxv64i16 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 55 for instruction: %nxv64i64_nxv64i16 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 37 for instruction: %nxv64i64_nxv64i32 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i8_nxv64i1 = trunc <vscale x 64 x i8> undef to <vscale x 64 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i16_nxv64i1 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i1>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i32_nxv64i1 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i1>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i64_nxv64i1 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i1>
; RV32-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -1188,17 +1188,17 @@ define void @trunc() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i1 = trunc <4 x i8> undef to <4 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i1 = trunc <4 x i16> undef to <4 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i1 = trunc <4 x i32> undef to <4 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i64_v4i1 = trunc <4 x i64> undef to <4 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i64_v4i1 = trunc <4 x i64> undef to <4 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i8 = trunc <8 x i16> undef to <8 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i32_v8i8 = trunc <8 x i32> undef to <8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i64_v8i8 = trunc <8 x i64> undef to <8 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i64_v8i8 = trunc <8 x i64> undef to <8 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i16 = trunc <8 x i32> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i64_v8i16 = trunc <8 x i64> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i64_v8i32 = trunc <8 x i64> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i64_v8i16 = trunc <8 x i64> undef to <8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i64_v8i32 = trunc <8 x i64> undef to <8 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i1 = trunc <8 x i8> undef to <8 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i1 = trunc <8 x i16> undef to <8 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i32_v8i1 = trunc <8 x i32> undef to <8 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i64_v8i1 = trunc <8 x i64> undef to <8 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i1 = trunc <8 x i32> undef to <8 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i64_v8i1 = trunc <8 x i64> undef to <8 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i8 = trunc <2 x i16> undef to <2 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i32_v16i8 = trunc <2 x i32> undef to <2 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i64_v16i8 = trunc <2 x i64> undef to <2 x i8>
@@ -1210,43 +1210,43 @@ define void @trunc() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i32_v16i1 = trunc <2 x i32> undef to <2 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i64_v16i1 = trunc <2 x i64> undef to <2 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i8 = trunc <16 x i16> undef to <16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i32_v32i8 = trunc <16 x i32> undef to <16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i64_v32i8 = trunc <16 x i64> undef to <16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i32_v32i16 = trunc <16 x i32> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i64_v32i16 = trunc <16 x i64> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i64_v32i32 = trunc <16 x i64> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i8 = trunc <16 x i32> undef to <16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i64_v32i8 = trunc <16 x i64> undef to <16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i32_v32i16 = trunc <16 x i32> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i64_v32i16 = trunc <16 x i64> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i64_v32i32 = trunc <16 x i64> undef to <16 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i8_v32i1 = trunc <16 x i8> undef to <16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i16_v32i1 = trunc <16 x i16> undef to <16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i32_v32i1 = trunc <16 x i32> undef to <16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i64_v32i1 = trunc <16 x i64> undef to <16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i16_v64i8 = trunc <64 x i16> undef to <64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i32_v64i8 = trunc <64 x i32> undef to <64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v64i64_v64i8 = trunc <64 x i64> undef to <64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i32_v64i16 = trunc <64 x i32> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i64_v64i16 = trunc <64 x i64> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i64_v64i32 = trunc <64 x i64> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i8_v64i1 = trunc <64 x i8> undef to <64 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i16_v64i1 = trunc <64 x i16> undef to <64 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i16_v32i1 = trunc <16 x i16> undef to <16 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i32_v32i1 = trunc <16 x i32> undef to <16 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i64_v32i1 = trunc <16 x i64> undef to <16 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v64i16_v64i8 = trunc <64 x i16> undef to <64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %v64i32_v64i8 = trunc <64 x i32> undef to <64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %v64i64_v64i8 = trunc <64 x i64> undef to <64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v64i32_v64i16 = trunc <64 x i32> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v64i64_v64i16 = trunc <64 x i64> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v64i64_v64i32 = trunc <64 x i64> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i1 = trunc <64 x i8> undef to <64 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i16_v64i1 = trunc <64 x i16> undef to <64 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v64i32_v64i1 = trunc <64 x i32> undef to <64 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v64i64_v64i1 = trunc <64 x i64> undef to <64 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i16_v128i8 = trunc <128 x i16> undef to <128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i32_v128i8 = trunc <128 x i32> undef to <128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %v128i64_v128i8 = trunc <128 x i64> undef to <128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i32_v128i16 = trunc <128 x i32> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v128i64_v128i16 = trunc <128 x i64> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i64_v128i32 = trunc <128 x i64> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i8_v128i1 = trunc <128 x i8> undef to <128 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i16_v128i1 = trunc <128 x i16> undef to <128 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v128i16_v128i8 = trunc <128 x i16> undef to <128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v128i32_v128i8 = trunc <128 x i32> undef to <128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %v128i64_v128i8 = trunc <128 x i64> undef to <128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v128i32_v128i16 = trunc <128 x i32> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %v128i64_v128i16 = trunc <128 x i64> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %v128i64_v128i32 = trunc <128 x i64> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i8_v128i1 = trunc <128 x i8> undef to <128 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i16_v128i1 = trunc <128 x i16> undef to <128 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v128i32_v128i1 = trunc <128 x i32> undef to <128 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v128i64_v128i1 = trunc <128 x i64> undef to <128 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i16_v256i8 = trunc <256 x i16> undef to <256 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i32_v256i8 = trunc <256 x i32> undef to <256 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v256i64_v256i8 = trunc <256 x i64> undef to <256 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i32_v256i16 = trunc <256 x i32> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %v256i64_v256i16 = trunc <256 x i64> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i64_v256i32 = trunc <256 x i64> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i8_v256i1 = trunc <256 x i8> undef to <256 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i16_v256i1 = trunc <256 x i16> undef to <256 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v256i16_v256i8 = trunc <256 x i16> undef to <256 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %v256i32_v256i8 = trunc <256 x i32> undef to <256 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 126 for instruction: %v256i64_v256i8 = trunc <256 x i64> undef to <256 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %v256i32_v256i16 = trunc <256 x i32> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %v256i64_v256i16 = trunc <256 x i64> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %v256i64_v256i32 = trunc <256 x i64> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i8_v256i1 = trunc <256 x i8> undef to <256 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i16_v256i1 = trunc <256 x i16> undef to <256 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v256i32_v256i1 = trunc <256 x i32> undef to <256 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v256i64_v256i1 = trunc <256 x i64> undef to <256 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i16_nxv1i8 = trunc <vscale x 1 x i16> undef to <vscale x 1 x i8>
@@ -1268,57 +1268,57 @@ define void @trunc() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i1 = trunc <vscale x 2 x i8> undef to <vscale x 2 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i1 = trunc <vscale x 2 x i16> undef to <vscale x 2 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i1 = trunc <vscale x 2 x i32> undef to <vscale x 2 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i64_nxv2i1 = trunc <vscale x 2 x i64> undef to <vscale x 2 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i64_nxv2i1 = trunc <vscale x 2 x i64> undef to <vscale x 2 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i8 = trunc <vscale x 4 x i16> undef to <vscale x 4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i32_nxv4i8 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i64_nxv4i8 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i64_nxv4i8 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i16 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i64_nxv4i16 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i64_nxv4i32 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i64_nxv4i16 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i64_nxv4i32 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i1 = trunc <vscale x 4 x i8> undef to <vscale x 4 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i1 = trunc <vscale x 4 x i16> undef to <vscale x 4 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i32_nxv4i1 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i64_nxv4i1 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i1 = trunc <vscale x 4 x i32> undef to <vscale x 4 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i64_nxv4i1 = trunc <vscale x 4 x i64> undef to <vscale x 4 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i8 = trunc <vscale x 8 x i16> undef to <vscale x 8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i32_nxv8i8 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i64_nxv8i8 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i16 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i64_nxv8i16 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i64_nxv8i32 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i32_nxv8i8 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i64_nxv8i8 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i32_nxv8i16 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i64_nxv8i16 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i64_nxv8i32 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i1 = trunc <vscale x 8 x i8> undef to <vscale x 8 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i16_nxv8i1 = trunc <vscale x 8 x i16> undef to <vscale x 8 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i32_nxv8i1 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i64_nxv8i1 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i8 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i32_nxv16i8 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i64_nxv16i8 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i32_nxv16i16 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i64_nxv16i16 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i64_nxv16i32 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i8_nxv16i1 = trunc <vscale x 16 x i8> undef to <vscale x 16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i16_nxv16i1 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i32_nxv16i1 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i64_nxv16i1 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i16_nxv32i8 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i32_nxv32i8 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv32i64_nxv32i8 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i32_nxv32i16 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i64_nxv32i16 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i64_nxv32i32 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i8_nxv32i1 = trunc <vscale x 32 x i8> undef to <vscale x 32 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i16_nxv32i1 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i32_nxv32i1 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i64_nxv32i1 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i16_nxv64i8 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i32_nxv64i8 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %nxv64i64_nxv64i8 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i32_nxv64i16 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv64i64_nxv64i16 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv64i64_nxv64i32 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i8_nxv64i1 = trunc <vscale x 64 x i8> undef to <vscale x 64 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i16_nxv64i1 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i32_nxv64i1 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i1>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %nxv64i64_nxv64i1 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i1 = trunc <vscale x 8 x i16> undef to <vscale x 8 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i1 = trunc <vscale x 8 x i32> undef to <vscale x 8 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i64_nxv8i1 = trunc <vscale x 8 x i64> undef to <vscale x 8 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i16_nxv16i8 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i32_nxv16i8 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv16i64_nxv16i8 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i32_nxv16i16 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv16i64_nxv16i16 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i64_nxv16i32 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i1 = trunc <vscale x 16 x i8> undef to <vscale x 16 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i1 = trunc <vscale x 16 x i16> undef to <vscale x 16 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i32_nxv16i1 = trunc <vscale x 16 x i32> undef to <vscale x 16 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i64_nxv16i1 = trunc <vscale x 16 x i64> undef to <vscale x 16 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i16_nxv32i8 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv32i32_nxv32i8 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %nxv32i64_nxv32i8 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i32_nxv32i16 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %nxv32i64_nxv32i16 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv32i64_nxv32i32 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i1 = trunc <vscale x 32 x i8> undef to <vscale x 32 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i16_nxv32i1 = trunc <vscale x 32 x i16> undef to <vscale x 32 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i32_nxv32i1 = trunc <vscale x 32 x i32> undef to <vscale x 32 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i64_nxv32i1 = trunc <vscale x 32 x i64> undef to <vscale x 32 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv64i16_nxv64i8 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %nxv64i32_nxv64i8 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %nxv64i64_nxv64i8 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv64i32_nxv64i16 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %nxv64i64_nxv64i16 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %nxv64i64_nxv64i32 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i8_nxv64i1 = trunc <vscale x 64 x i8> undef to <vscale x 64 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i16_nxv64i1 = trunc <vscale x 64 x i16> undef to <vscale x 64 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i32_nxv64i1 = trunc <vscale x 64 x i32> undef to <vscale x 64 x i1>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %nxv64i64_nxv64i1 = trunc <vscale x 64 x i64> undef to <vscale x 64 x i1>
; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
%v2i16_v2i8 = trunc <2 x i16> undef to <2 x i8>
@@ -1495,44 +1495,44 @@ define void @fpext() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16_v2f64 = fpext <2 x half> undef to <2 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32_v2f64 = fpext <2 x float> undef to <2 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4f16_v4f32 = fpext <4 x half> undef to <4 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16_v4f64 = fpext <4 x half> undef to <4 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4f32_v4f64 = fpext <4 x float> undef to <4 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f16_v8f32 = fpext <8 x half> undef to <8 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f16_v8f64 = fpext <8 x half> undef to <8 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_v8f64 = fpext <8 x float> undef to <8 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f16_v16f32 = fpext <16 x half> undef to <16 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f16_v16f64 = fpext <16 x half> undef to <16 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_v16f64 = fpext <16 x float> undef to <16 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32f16_v32f32 = fpext <32 x half> undef to <32 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32f16_v32f64 = fpext <32 x half> undef to <32 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32f32_v32f64 = fpext <32 x float> undef to <32 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64f16_v64f32 = fpext <64 x half> undef to <64 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64f16_v64f64 = fpext <64 x half> undef to <64 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64f32_v64f64 = fpext <64 x float> undef to <64 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128f16_v128f32 = fpext <128 x half> undef to <128 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v128f16_v128f64 = fpext <128 x half> undef to <128 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128f32_v128f64 = fpext <128 x float> undef to <128 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f16_v4f64 = fpext <4 x half> undef to <4 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32_v4f64 = fpext <4 x float> undef to <4 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f16_v8f32 = fpext <8 x half> undef to <8 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16_v8f64 = fpext <8 x half> undef to <8 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32_v8f64 = fpext <8 x float> undef to <8 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16f16_v16f32 = fpext <16 x half> undef to <16 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16_v16f64 = fpext <16 x half> undef to <16 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f32_v16f64 = fpext <16 x float> undef to <16 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32f16_v32f32 = fpext <32 x half> undef to <32 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %v32f16_v32f64 = fpext <32 x half> undef to <32 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32f32_v32f64 = fpext <32 x float> undef to <32 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64f16_v64f32 = fpext <64 x half> undef to <64 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 51 for instruction: %v64f16_v64f64 = fpext <64 x half> undef to <64 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64f32_v64f64 = fpext <64 x float> undef to <64 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128f16_v128f32 = fpext <128 x half> undef to <128 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v128f16_v128f64 = fpext <128 x half> undef to <128 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128f32_v128f64 = fpext <128 x float> undef to <128 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1f16_nxv1f32 = fpext <vscale x 1 x half> undef to <vscale x 1 x float>
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1f16_nxv1f64 = fpext <vscale x 1 x half> undef to <vscale x 1 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1f32_nxv1f64 = fpext <vscale x 1 x float> undef to <vscale x 1 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2f16_nxv2f32 = fpext <vscale x 2 x half> undef to <vscale x 2 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2f16_nxv2f64 = fpext <vscale x 2 x half> undef to <vscale x 2 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2f32_nxv2f64 = fpext <vscale x 2 x float> undef to <vscale x 2 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4f16_nxv4f32 = fpext <vscale x 4 x half> undef to <vscale x 4 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4f16_nxv4f64 = fpext <vscale x 4 x half> undef to <vscale x 4 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4f32_nxv4f64 = fpext <vscale x 4 x float> undef to <vscale x 4 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8f16_nxv8f32 = fpext <vscale x 8 x half> undef to <vscale x 8 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8f16_nxv8f64 = fpext <vscale x 8 x half> undef to <vscale x 8 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8f32_nxv8f64 = fpext <vscale x 8 x float> undef to <vscale x 8 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16f16_nxv16f32 = fpext <vscale x 16 x half> undef to <vscale x 16 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16f16_nxv16f64 = fpext <vscale x 16 x half> undef to <vscale x 16 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16f32_nxv16f64 = fpext <vscale x 16 x float> undef to <vscale x 16 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32f16_nxv32f32 = fpext <vscale x 32 x half> undef to <vscale x 32 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32f16_nxv32f64 = fpext <vscale x 32 x half> undef to <vscale x 32 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32f32_nxv32f64 = fpext <vscale x 32 x float> undef to <vscale x 32 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64f16_nxv64f32 = fpext <vscale x 64 x half> undef to <vscale x 64 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv64f16_nxv64f64 = fpext <vscale x 64 x half> undef to <vscale x 64 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv64f32_nxv64f64 = fpext <vscale x 64 x float> undef to <vscale x 64 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2f16_nxv2f64 = fpext <vscale x 2 x half> undef to <vscale x 2 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2f32_nxv2f64 = fpext <vscale x 2 x float> undef to <vscale x 2 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4f16_nxv4f32 = fpext <vscale x 4 x half> undef to <vscale x 4 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4f16_nxv4f64 = fpext <vscale x 4 x half> undef to <vscale x 4 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4f32_nxv4f64 = fpext <vscale x 4 x float> undef to <vscale x 4 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8f16_nxv8f32 = fpext <vscale x 8 x half> undef to <vscale x 8 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv8f16_nxv8f64 = fpext <vscale x 8 x half> undef to <vscale x 8 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8f32_nxv8f64 = fpext <vscale x 8 x float> undef to <vscale x 8 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16f16_nxv16f32 = fpext <vscale x 16 x half> undef to <vscale x 16 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %nxv16f16_nxv16f64 = fpext <vscale x 16 x half> undef to <vscale x 16 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16f32_nxv16f64 = fpext <vscale x 16 x float> undef to <vscale x 16 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32f16_nxv32f32 = fpext <vscale x 32 x half> undef to <vscale x 32 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 51 for instruction: %nxv32f16_nxv32f64 = fpext <vscale x 32 x half> undef to <vscale x 32 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32f32_nxv32f64 = fpext <vscale x 32 x float> undef to <vscale x 32 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64f16_nxv64f32 = fpext <vscale x 64 x half> undef to <vscale x 64 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %nxv64f16_nxv64f64 = fpext <vscale x 64 x half> undef to <vscale x 64 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv64f32_nxv64f64 = fpext <vscale x 64 x float> undef to <vscale x 64 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
%v2f16_v2f32 = fpext <2 x half> undef to <2 x float>
@@ -1603,20 +1603,20 @@ define void @fptrunc() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f64_v4f16 = fptrunc <4 x double> undef to <4 x half>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4f64_v4f32 = fptrunc <4 x double> undef to <4 x float>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_v8f16 = fptrunc <8 x float> undef to <8 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f64_v8f16 = fptrunc <8 x double> undef to <8 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f64_v8f32 = fptrunc <8 x double> undef to <8 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_v16f16 = fptrunc <16 x float> undef to <16 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f64_v16f16 = fptrunc <16 x double> undef to <16 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f64_v16f32 = fptrunc <16 x double> undef to <16 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32f32_v32f16 = fptrunc <32 x float> undef to <32 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32f64_v32f16 = fptrunc <32 x double> undef to <32 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32f64_v32f32 = fptrunc <32 x double> undef to <32 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64f32_v64f16 = fptrunc <64 x float> undef to <64 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64f64_v64f16 = fptrunc <64 x double> undef to <64 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64f64_v64f32 = fptrunc <64 x double> undef to <64 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128f32_v128f16 = fptrunc <128 x float> undef to <128 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v128f64_v128f16 = fptrunc <128 x double> undef to <128 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128f64_v128f32 = fptrunc <128 x double> undef to <128 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8f64_v8f16 = fptrunc <8 x double> undef to <8 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f64_v8f32 = fptrunc <8 x double> undef to <8 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_v16f16 = fptrunc <16 x float> undef to <16 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16f64_v16f16 = fptrunc <16 x double> undef to <16 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16f64_v16f32 = fptrunc <16 x double> undef to <16 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32f32_v32f16 = fptrunc <32 x float> undef to <32 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %v32f64_v32f16 = fptrunc <32 x double> undef to <32 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32f64_v32f32 = fptrunc <32 x double> undef to <32 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v64f32_v64f16 = fptrunc <64 x float> undef to <64 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v64f64_v64f16 = fptrunc <64 x double> undef to <64 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v64f64_v64f32 = fptrunc <64 x double> undef to <64 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v128f32_v128f16 = fptrunc <128 x float> undef to <128 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %v128f64_v128f16 = fptrunc <128 x double> undef to <128 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %v128f64_v128f32 = fptrunc <128 x double> undef to <128 x float>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1f32_nxv1f16 = fptrunc <vscale x 1 x float> undef to <vscale x 1 x half>
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1f64_nxv1f16 = fptrunc <vscale x 1 x double> undef to <vscale x 1 x half>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1f64_nxv1f32 = fptrunc <vscale x 1 x double> undef to <vscale x 1 x float>
@@ -1624,20 +1624,20 @@ define void @fptrunc() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2f64_nxv1f16 = fptrunc <vscale x 2 x double> undef to <vscale x 2 x half>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2f64_nxv1f32 = fptrunc <vscale x 2 x double> undef to <vscale x 2 x float>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4f32_nxv4f16 = fptrunc <vscale x 4 x float> undef to <vscale x 4 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4f64_nxv4f16 = fptrunc <vscale x 4 x double> undef to <vscale x 4 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4f64_nxv4f32 = fptrunc <vscale x 4 x double> undef to <vscale x 4 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8f32_nxv8f16 = fptrunc <vscale x 8 x float> undef to <vscale x 8 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8f64_nxv8f16 = fptrunc <vscale x 8 x double> undef to <vscale x 8 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8f64_nxv8f32 = fptrunc <vscale x 8 x double> undef to <vscale x 8 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16f32_nxv16f16 = fptrunc <vscale x 16 x float> undef to <vscale x 16 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16f64_nxv16f16 = fptrunc <vscale x 16 x double> undef to <vscale x 16 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16f64_nxv16f32 = fptrunc <vscale x 16 x double> undef to <vscale x 16 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32f32_nxv32f16 = fptrunc <vscale x 32 x float> undef to <vscale x 32 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32f64_nxv32f16 = fptrunc <vscale x 32 x double> undef to <vscale x 32 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32f64_nxv32f32 = fptrunc <vscale x 32 x double> undef to <vscale x 32 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64f32_nxv64f16 = fptrunc <vscale x 64 x float> undef to <vscale x 64 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv64f64_nxv64f16 = fptrunc <vscale x 64 x double> undef to <vscale x 64 x half>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv64f64_nxv64f32 = fptrunc <vscale x 64 x double> undef to <vscale x 64 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4f64_nxv4f16 = fptrunc <vscale x 4 x double> undef to <vscale x 4 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4f64_nxv4f32 = fptrunc <vscale x 4 x double> undef to <vscale x 4 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8f32_nxv8f16 = fptrunc <vscale x 8 x float> undef to <vscale x 8 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8f64_nxv8f16 = fptrunc <vscale x 8 x double> undef to <vscale x 8 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8f64_nxv8f32 = fptrunc <vscale x 8 x double> undef to <vscale x 8 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16f32_nxv16f16 = fptrunc <vscale x 16 x float> undef to <vscale x 16 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv16f64_nxv16f16 = fptrunc <vscale x 16 x double> undef to <vscale x 16 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16f64_nxv16f32 = fptrunc <vscale x 16 x double> undef to <vscale x 16 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32f32_nxv32f16 = fptrunc <vscale x 32 x float> undef to <vscale x 32 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %nxv32f64_nxv32f16 = fptrunc <vscale x 32 x double> undef to <vscale x 32 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv32f64_nxv32f32 = fptrunc <vscale x 32 x double> undef to <vscale x 32 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv64f32_nxv64f16 = fptrunc <vscale x 64 x float> undef to <vscale x 64 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %nxv64f64_nxv64f16 = fptrunc <vscale x 64 x double> undef to <vscale x 64 x half>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %nxv64f64_nxv64f32 = fptrunc <vscale x 64 x double> undef to <vscale x 64 x float>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
%v2f32_v2f16 = fptrunc <2 x float> undef to <2 x half>
diff --git a/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll b/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
index ec669c9..79cf1c8 100644
--- a/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
@@ -12,36 +12,36 @@ define void @smax() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.smax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.smax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.smax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.smax.i16(i16 undef, i16 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.smax.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.smax.v8i16(<8 x i16> undef, <8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.smax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.smax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.smax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.smax.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> undef, <4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.smax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.smax.i64(i64 undef, i64 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> undef, <2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.smax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %37 = call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %38 = call <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %39 = call <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i8 @llvm.smax.i8(i8 undef, i8 undef)
@@ -97,36 +97,36 @@ define void @smin() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.smin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.smin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.smin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.smin.i16(i16 undef, i16 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.smin.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.smin.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> undef, <8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.smin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.smin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.smin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.smin.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.smin.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.smin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.smin.i64(i64 undef, i64 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.smin.v2i64(<2 x i64> undef, <2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.smin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %37 = call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %38 = call <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %39 = call <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i8 @llvm.smin.i8(i8 undef, i8 undef)
@@ -182,36 +182,36 @@ define void @umax() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.umax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.umax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.umax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.umax.i16(i16 undef, i16 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.umax.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.umax.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.umax.v8i16(<8 x i16> undef, <8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.umax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.umax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.umax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.umax.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.umax.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> undef, <4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.umax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.umax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.umax.i64(i64 undef, i64 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> undef, <2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.umax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %37 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %38 = call <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %39 = call <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i8 @llvm.umax.i8(i8 undef, i8 undef)
@@ -267,36 +267,36 @@ define void @umin() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.umin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.umin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.umin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.umin.i16(i16 undef, i16 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.umin.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.umin.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.umin.v8i16(<8 x i16> undef, <8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.umin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.umin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.umin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.umin.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.umin.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> undef, <4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.umin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.umin.i64(i64 undef, i64 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> undef, <2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.umin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %37 = call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %38 = call <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %39 = call <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i8 @llvm.umin.i8(i8 undef, i8 undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
index 1618c38..f91f13b 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
@@ -6,23 +6,37 @@
define float @reduce_fmaximum_f32(float %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %2 = call fast float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %3 = call fast float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %4 = call fast float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %5 = call fast float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call fast float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call fast float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float undef
;
; SIZE-LABEL: 'reduce_fmaximum_f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call fast float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call fast float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call fast float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call fast float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call fast float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call fast float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float undef
;
%V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
@@ -32,6 +46,13 @@ define float @reduce_fmaximum_f32(float %arg) {
%V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
%V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
%V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
ret float undef
}
declare float @llvm.vector.reduce.fmaximum.v2f32(<2 x float>)
@@ -44,21 +65,33 @@ declare float @llvm.vector.reduce.fmaximum.v128f32(<128 x float>)
define double @reduce_fmaximum_f64(double %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %2 = call fast double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %3 = call fast double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %4 = call fast double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %5 = call fast double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %6 = call fast double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double undef
;
; SIZE-LABEL: 'reduce_fmaximum_f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call fast double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call fast double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call fast double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call fast double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call fast double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double undef
;
%V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
@@ -67,6 +100,12 @@ define double @reduce_fmaximum_f64(double %arg) {
%V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
%V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
%V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
ret double undef
}
declare double @llvm.vector.reduce.fmaximum.v2f64(<2 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
index 35b1864..86b8402 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
@@ -6,23 +6,23 @@
define float @reduce_fmaximum_f32(float %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float undef
;
; SIZE-LABEL: 'reduce_fmaximum_f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float undef
;
%V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
@@ -44,21 +44,21 @@ declare float @llvm.vector.reduce.fminimum.v128f32(<128 x float>)
define double @reduce_fmaximum_f64(double %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double undef
;
; SIZE-LABEL: 'reduce_fmaximum_f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double undef
;
%V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll
index f3436b5..533a77d 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll
@@ -51,14 +51,14 @@ define i32 @reduce_umax_i8(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umax_i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i8 @llvm.vector.reduce.umax.v64i8(<64 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i8 @llvm.vector.reduce.umax.v128i8(<128 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i8 @llvm.vector.reduce.umax.v64i8(<64 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i8 @llvm.vector.reduce.umax.v128i8(<128 x i8> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> undef)
@@ -85,14 +85,14 @@ define i32 @reduce_umax_i16(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umax_i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i16 @llvm.vector.reduce.umax.v1i16(<1 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i16 @llvm.vector.reduce.umax.v32i16(<32 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i16 @llvm.vector.reduce.umax.v64i16(<64 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i16 @llvm.vector.reduce.umax.v128i16(<128 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i16 @llvm.vector.reduce.umax.v1i16(<1 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i16 @llvm.vector.reduce.umax.v32i16(<32 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i16 @llvm.vector.reduce.umax.v64i16(<64 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i16 @llvm.vector.reduce.umax.v128i16(<128 x i16> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i16 @llvm.vector.reduce.umax.v1i16(<1 x i16> undef)
@@ -115,18 +115,18 @@ define i32 @reduce_umax_i32(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i32 @llvm.vector.reduce.umax.v32i32(<32 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call i32 @llvm.vector.reduce.umax.v64i32(<64 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call i32 @llvm.vector.reduce.umax.v128i32(<128 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V128 = call i32 @llvm.vector.reduce.umax.v128i32(<128 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umax_i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i32 @llvm.vector.reduce.umax.v32i32(<32 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i32 @llvm.vector.reduce.umax.v64i32(<64 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i32 @llvm.vector.reduce.umax.v128i32(<128 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i32 @llvm.vector.reduce.umax.v32i32(<32 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i32 @llvm.vector.reduce.umax.v64i32(<64 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V128 = call i32 @llvm.vector.reduce.umax.v128i32(<128 x i32> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> undef)
@@ -148,19 +148,19 @@ define i32 @reduce_umax_i64(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i64 @llvm.vector.reduce.umax.v32i64(<32 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call i64 @llvm.vector.reduce.umax.v64i64(<64 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V128 = call i64 @llvm.vector.reduce.umax.v128i64(<128 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V64 = call i64 @llvm.vector.reduce.umax.v64i64(<64 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V128 = call i64 @llvm.vector.reduce.umax.v128i64(<128 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umax_i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i64 @llvm.vector.reduce.umax.v32i64(<32 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i64 @llvm.vector.reduce.umax.v64i64(<64 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V128 = call i64 @llvm.vector.reduce.umax.v128i64(<128 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i64 @llvm.vector.reduce.umax.v32i64(<32 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64 = call i64 @llvm.vector.reduce.umax.v64i64(<64 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V128 = call i64 @llvm.vector.reduce.umax.v128i64(<128 x i64> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> undef)
@@ -221,14 +221,14 @@ define i32 @reduce_smax_i8(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smax_i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i8 @llvm.vector.reduce.smax.v1i8(<1 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i8 @llvm.vector.reduce.smax.v64i8(<64 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i8 @llvm.vector.reduce.smax.v128i8(<128 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i8 @llvm.vector.reduce.smax.v1i8(<1 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i8 @llvm.vector.reduce.smax.v64i8(<64 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i8 @llvm.vector.reduce.smax.v128i8(<128 x i8> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i8 @llvm.vector.reduce.smax.v1i8(<1 x i8> undef)
@@ -255,14 +255,14 @@ define i32 @reduce_smax_i16(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smax_i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i16 @llvm.vector.reduce.smax.v1i16(<1 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i16 @llvm.vector.reduce.smax.v32i16(<32 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i16 @llvm.vector.reduce.smax.v64i16(<64 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i16 @llvm.vector.reduce.smax.v128i16(<128 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i16 @llvm.vector.reduce.smax.v1i16(<1 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i16 @llvm.vector.reduce.smax.v32i16(<32 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i16 @llvm.vector.reduce.smax.v64i16(<64 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i16 @llvm.vector.reduce.smax.v128i16(<128 x i16> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i16 @llvm.vector.reduce.smax.v1i16(<1 x i16> undef)
@@ -285,18 +285,18 @@ define i32 @reduce_smax_i32(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i32 @llvm.vector.reduce.smax.v32i32(<32 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call i32 @llvm.vector.reduce.smax.v64i32(<64 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call i32 @llvm.vector.reduce.smax.v128i32(<128 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V128 = call i32 @llvm.vector.reduce.smax.v128i32(<128 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smax_i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i32 @llvm.vector.reduce.smax.v32i32(<32 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i32 @llvm.vector.reduce.smax.v64i32(<64 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i32 @llvm.vector.reduce.smax.v128i32(<128 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i32 @llvm.vector.reduce.smax.v32i32(<32 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i32 @llvm.vector.reduce.smax.v64i32(<64 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V128 = call i32 @llvm.vector.reduce.smax.v128i32(<128 x i32> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> undef)
@@ -318,19 +318,19 @@ define i32 @reduce_smax_i64(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i64 @llvm.vector.reduce.smax.v32i64(<32 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call i64 @llvm.vector.reduce.smax.v64i64(<64 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V128 = call i64 @llvm.vector.reduce.smax.v128i64(<128 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V64 = call i64 @llvm.vector.reduce.smax.v64i64(<64 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V128 = call i64 @llvm.vector.reduce.smax.v128i64(<128 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smax_i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i64 @llvm.vector.reduce.smax.v1i64(<1 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i64 @llvm.vector.reduce.smax.v32i64(<32 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i64 @llvm.vector.reduce.smax.v64i64(<64 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V128 = call i64 @llvm.vector.reduce.smax.v128i64(<128 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i64 @llvm.vector.reduce.smax.v1i64(<1 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i64 @llvm.vector.reduce.smax.v32i64(<32 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64 = call i64 @llvm.vector.reduce.smax.v64i64(<64 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V128 = call i64 @llvm.vector.reduce.smax.v128i64(<128 x i64> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i64 @llvm.vector.reduce.smax.v1i64(<1 x i64> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll
index 1964e35..ae99118 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll
@@ -51,14 +51,14 @@ define i32 @reduce_umin_i8(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umin_i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i8 @llvm.vector.reduce.umin.v1i8(<1 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i8 @llvm.vector.reduce.umin.v64i8(<64 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i8 @llvm.vector.reduce.umin.v1i8(<1 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i8 @llvm.vector.reduce.umin.v64i8(<64 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i8 @llvm.vector.reduce.umin.v1i8(<1 x i8> undef)
@@ -85,14 +85,14 @@ define i32 @reduce_umin_i16(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umin_i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i16 @llvm.vector.reduce.umin.v1i16(<1 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i16 @llvm.vector.reduce.umin.v32i16(<32 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i16 @llvm.vector.reduce.umin.v64i16(<64 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i16 @llvm.vector.reduce.umin.v128i16(<128 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i16 @llvm.vector.reduce.umin.v1i16(<1 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i16 @llvm.vector.reduce.umin.v32i16(<32 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i16 @llvm.vector.reduce.umin.v64i16(<64 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i16 @llvm.vector.reduce.umin.v128i16(<128 x i16> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i16 @llvm.vector.reduce.umin.v1i16(<1 x i16> undef)
@@ -115,18 +115,18 @@ define i32 @reduce_umin_i32(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i32 @llvm.vector.reduce.umin.v32i32(<32 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call i32 @llvm.vector.reduce.umin.v64i32(<64 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call i32 @llvm.vector.reduce.umin.v128i32(<128 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V128 = call i32 @llvm.vector.reduce.umin.v128i32(<128 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umin_i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i32 @llvm.vector.reduce.umin.v32i32(<32 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i32 @llvm.vector.reduce.umin.v64i32(<64 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i32 @llvm.vector.reduce.umin.v128i32(<128 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i32 @llvm.vector.reduce.umin.v32i32(<32 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i32 @llvm.vector.reduce.umin.v64i32(<64 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V128 = call i32 @llvm.vector.reduce.umin.v128i32(<128 x i32> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> undef)
@@ -148,19 +148,19 @@ define i32 @reduce_umin_i64(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i64 @llvm.vector.reduce.umin.v32i64(<32 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call i64 @llvm.vector.reduce.umin.v64i64(<64 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V128 = call i64 @llvm.vector.reduce.umin.v128i64(<128 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V64 = call i64 @llvm.vector.reduce.umin.v64i64(<64 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V128 = call i64 @llvm.vector.reduce.umin.v128i64(<128 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_umin_i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i64 @llvm.vector.reduce.umin.v1i64(<1 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i64 @llvm.vector.reduce.umin.v32i64(<32 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i64 @llvm.vector.reduce.umin.v64i64(<64 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V128 = call i64 @llvm.vector.reduce.umin.v128i64(<128 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i64 @llvm.vector.reduce.umin.v1i64(<1 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i64 @llvm.vector.reduce.umin.v32i64(<32 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64 = call i64 @llvm.vector.reduce.umin.v64i64(<64 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V128 = call i64 @llvm.vector.reduce.umin.v128i64(<128 x i64> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i64 @llvm.vector.reduce.umin.v1i64(<1 x i64> undef)
@@ -221,14 +221,14 @@ define i32 @reduce_smin_i8(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smin_i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i8 @llvm.vector.reduce.smin.v1i8(<1 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i8 @llvm.vector.reduce.smin.v64i8(<64 x i8> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i8 @llvm.vector.reduce.smin.v128i8(<128 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i8 @llvm.vector.reduce.smin.v1i8(<1 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i8 @llvm.vector.reduce.smin.v64i8(<64 x i8> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i8 @llvm.vector.reduce.smin.v128i8(<128 x i8> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i8 @llvm.vector.reduce.smin.v1i8(<1 x i8> undef)
@@ -255,14 +255,14 @@ define i32 @reduce_smin_i16(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smin_i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i16 @llvm.vector.reduce.smin.v1i16(<1 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i16 @llvm.vector.reduce.smin.v32i16(<32 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i16 @llvm.vector.reduce.smin.v64i16(<64 x i16> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = call i16 @llvm.vector.reduce.smin.v128i16(<128 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i16 @llvm.vector.reduce.smin.v1i16(<1 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i16 @llvm.vector.reduce.smin.v32i16(<32 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i16 @llvm.vector.reduce.smin.v64i16(<64 x i16> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i16 @llvm.vector.reduce.smin.v128i16(<128 x i16> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i16 @llvm.vector.reduce.smin.v1i16(<1 x i16> undef)
@@ -285,18 +285,18 @@ define i32 @reduce_smin_i32(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i32 @llvm.vector.reduce.smin.v32i32(<32 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call i32 @llvm.vector.reduce.smin.v64i32(<64 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call i32 @llvm.vector.reduce.smin.v128i32(<128 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V128 = call i32 @llvm.vector.reduce.smin.v128i32(<128 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smin_i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i32 @llvm.vector.reduce.smin.v32i32(<32 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call i32 @llvm.vector.reduce.smin.v64i32(<64 x i32> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call i32 @llvm.vector.reduce.smin.v128i32(<128 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i32 @llvm.vector.reduce.smin.v32i32(<32 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i32 @llvm.vector.reduce.smin.v64i32(<64 x i32> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V128 = call i32 @llvm.vector.reduce.smin.v128i32(<128 x i32> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> undef)
@@ -318,19 +318,19 @@ define i32 @reduce_smin_i64(i32 %arg) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call i64 @llvm.vector.reduce.smin.v32i64(<32 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call i64 @llvm.vector.reduce.smin.v64i64(<64 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V128 = call i64 @llvm.vector.reduce.smin.v128i64(<128 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V64 = call i64 @llvm.vector.reduce.smin.v64i64(<64 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V128 = call i64 @llvm.vector.reduce.smin.v128i64(<128 x i64> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SIZE-LABEL: 'reduce_smin_i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1 = call i64 @llvm.vector.reduce.smin.v1i64(<1 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call i64 @llvm.vector.reduce.smin.v32i64(<32 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call i64 @llvm.vector.reduce.smin.v64i64(<64 x i64> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V128 = call i64 @llvm.vector.reduce.smin.v128i64(<128 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1 = call i64 @llvm.vector.reduce.smin.v1i64(<1 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4 = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8 = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32 = call i64 @llvm.vector.reduce.smin.v32i64(<32 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64 = call i64 @llvm.vector.reduce.smin.v64i64(<64 x i64> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V128 = call i64 @llvm.vector.reduce.smin.v128i64(<128 x i64> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%V1 = call i64 @llvm.vector.reduce.smin.v1i64(<1 x i64> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll
index 0dd3e3c..a9a5f4d2 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll
@@ -238,7 +238,7 @@ define float @vreduce_ord_fadd_nxv4f32(<vscale x 4 x float> %v, float %s) {
define float @vreduce_fwadd_nxv4f32(<vscale x 4 x half> %v, float %s) {
; CHECK-LABEL: 'vreduce_fwadd_nxv4f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fpext <vscale x 4 x half> %v to <vscale x 4 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = fpext <vscale x 4 x half> %v to <vscale x 4 x float>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float %s, <vscale x 4 x float> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
@@ -254,7 +254,7 @@ define float @vreduce_fwadd_nxv4f32(<vscale x 4 x half> %v, float %s) {
define float @vreduce_ord_fwadd_nxv4f32(<vscale x 4 x half> %v, float %s) {
; CHECK-LABEL: 'vreduce_ord_fwadd_nxv4f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fpext <vscale x 4 x half> %v to <vscale x 4 x float>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = fpext <vscale x 4 x half> %v to <vscale x 4 x float>
; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %red = call float @llvm.vector.reduce.fadd.nxv4f32(float %s, <vscale x 4 x float> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
@@ -358,7 +358,7 @@ define double @vreduce_ord_fadd_nxv2f64(<vscale x 2 x double> %v, double %s) {
define double @vreduce_fwadd_nxv2f64(<vscale x 2 x float> %v, double %s) {
; CHECK-LABEL: 'vreduce_fwadd_nxv2f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fpext <vscale x 2 x float> %v to <vscale x 2 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = fpext <vscale x 2 x float> %v to <vscale x 2 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call reassoc double @llvm.vector.reduce.fadd.nxv2f64(double %s, <vscale x 2 x double> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
@@ -374,7 +374,7 @@ define double @vreduce_fwadd_nxv2f64(<vscale x 2 x float> %v, double %s) {
define double @vreduce_ord_fwadd_nxv2f64(<vscale x 2 x float> %v, double %s) {
; CHECK-LABEL: 'vreduce_ord_fwadd_nxv2f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fpext <vscale x 2 x float> %v to <vscale x 2 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = fpext <vscale x 2 x float> %v to <vscale x 2 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %red = call double @llvm.vector.reduce.fadd.nxv2f64(double %s, <vscale x 2 x double> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
@@ -418,7 +418,7 @@ define double @vreduce_ord_fadd_nxv4f64(<vscale x 4 x double> %v, double %s) {
define double @vreduce_fwadd_nxv4f64(<vscale x 4 x float> %v, double %s) {
; CHECK-LABEL: 'vreduce_fwadd_nxv4f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fpext <vscale x 4 x float> %v to <vscale x 4 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e = fpext <vscale x 4 x float> %v to <vscale x 4 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call reassoc double @llvm.vector.reduce.fadd.nxv4f64(double %s, <vscale x 4 x double> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
@@ -434,7 +434,7 @@ define double @vreduce_fwadd_nxv4f64(<vscale x 4 x float> %v, double %s) {
define double @vreduce_ord_fwadd_nxv4f64(<vscale x 4 x float> %v, double %s) {
; CHECK-LABEL: 'vreduce_ord_fwadd_nxv4f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fpext <vscale x 4 x float> %v to <vscale x 4 x double>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e = fpext <vscale x 4 x float> %v to <vscale x 4 x double>
; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %red = call double @llvm.vector.reduce.fadd.nxv4f64(double %s, <vscale x 4 x double> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
@@ -456,7 +456,7 @@ define half @vreduce_fmin_nxv1f16(<vscale x 1 x half> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
@@ -469,7 +469,7 @@ define half @vreduce_fmin_nxv1f16_nonans(<vscale x 1 x half> %v) #0 {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f16_nonans'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call nnan half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
@@ -482,7 +482,7 @@ define half @vreduce_fmin_nxv1f16_nonans_noinfs(<vscale x 1 x half> %v) #1 {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f16_nonans_noinfs'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan ninf half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan ninf half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call nnan ninf half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
@@ -497,7 +497,7 @@ define half @vreduce_fmin_nxv2f16(<vscale x 2 x half> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv2f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv2f16(<vscale x 2 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv2f16(<vscale x 2 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmin.nxv2f16(<vscale x 2 x half> %v)
@@ -512,7 +512,7 @@ define half @vreduce_fmin_nxv4f16(<vscale x 4 x half> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv4f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv4f16(<vscale x 4 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv4f16(<vscale x 4 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmin.nxv4f16(<vscale x 4 x half> %v)
@@ -523,11 +523,11 @@ declare half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half>)
define half @vreduce_fmin_nxv64f16(<vscale x 64 x half> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv64f16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half> %v)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half> %v)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv64f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half> %v)
@@ -542,7 +542,7 @@ define float @vreduce_fmin_nxv1f32(<vscale x 1 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
@@ -555,7 +555,7 @@ define float @vreduce_fmin_nxv1f32_nonans(<vscale x 1 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f32_nonans'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call nnan float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
@@ -568,7 +568,7 @@ define float @vreduce_fmin_nxv1f32_nonans_noinfs(<vscale x 1 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f32_nonans_noinfs'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan ninf float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan ninf float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call nnan ninf float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
@@ -583,7 +583,7 @@ define float @vreduce_fmin_nxv2f32(<vscale x 2 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv2f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv2f32(<vscale x 2 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv2f32(<vscale x 2 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmin.nxv2f32(<vscale x 2 x float> %v)
@@ -598,7 +598,7 @@ define float @vreduce_fmin_nxv4f32(<vscale x 4 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv4f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> %v)
@@ -609,11 +609,11 @@ declare float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float>)
define float @vreduce_fmin_nxv32f32(<vscale x 32 x float> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv32f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float> %v)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float> %v)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv32f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float> %v)
@@ -628,7 +628,7 @@ define double @vreduce_fmin_nxv1f64(<vscale x 1 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
@@ -641,7 +641,7 @@ define double @vreduce_fmin_nxv1f64_nonans(<vscale x 1 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f64_nonans'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call nnan double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
@@ -654,7 +654,7 @@ define double @vreduce_fmin_nxv1f64_nonans_noinfs(<vscale x 1 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv1f64_nonans_noinfs'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan ninf double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan ninf double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call nnan ninf double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
@@ -669,7 +669,7 @@ define double @vreduce_fmin_nxv2f64(<vscale x 2 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv2f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv2f64(<vscale x 2 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv2f64(<vscale x 2 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmin.nxv2f64(<vscale x 2 x double> %v)
@@ -684,7 +684,7 @@ define double @vreduce_fmin_nxv4f64(<vscale x 4 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv4f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv4f64(<vscale x 4 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv4f64(<vscale x 4 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmin.nxv4f64(<vscale x 4 x double> %v)
@@ -695,11 +695,11 @@ declare double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double>)
define double @vreduce_fmin_nxv16f64(<vscale x 16 x double> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv16f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double> %v)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double> %v)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmin_nxv16f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double> %v)
@@ -714,7 +714,7 @@ define half @vreduce_fmax_nxv1f16(<vscale x 1 x half> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
@@ -727,7 +727,7 @@ define half @vreduce_fmax_nxv1f16_nonans(<vscale x 1 x half> %v) #0 {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f16_nonans'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call nnan half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
@@ -740,7 +740,7 @@ define half @vreduce_fmax_nxv1f16_nonans_noinfs(<vscale x 1 x half> %v) #1 {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f16_nonans_noinfs'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan ninf half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan ninf half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call nnan ninf half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
@@ -755,7 +755,7 @@ define half @vreduce_fmax_nxv2f16(<vscale x 2 x half> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv2f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv2f16(<vscale x 2 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv2f16(<vscale x 2 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmax.nxv2f16(<vscale x 2 x half> %v)
@@ -770,7 +770,7 @@ define half @vreduce_fmax_nxv4f16(<vscale x 4 x half> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv4f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv4f16(<vscale x 4 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv4f16(<vscale x 4 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmax.nxv4f16(<vscale x 4 x half> %v)
@@ -781,11 +781,11 @@ declare half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half>)
define half @vreduce_fmax_nxv64f16(<vscale x 64 x half> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv64f16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half> %v)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half> %v)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv64f16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret half %red
;
%red = call half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half> %v)
@@ -800,7 +800,7 @@ define float @vreduce_fmax_nxv1f32(<vscale x 1 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
@@ -813,7 +813,7 @@ define float @vreduce_fmax_nxv1f32_nonans(<vscale x 1 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f32_nonans'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call nnan float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
@@ -826,7 +826,7 @@ define float @vreduce_fmax_nxv1f32_nonans_noinfs(<vscale x 1 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f32_nonans_noinfs'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan ninf float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan ninf float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call nnan ninf float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
@@ -841,7 +841,7 @@ define float @vreduce_fmax_nxv2f32(<vscale x 2 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv2f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv2f32(<vscale x 2 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv2f32(<vscale x 2 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmax.nxv2f32(<vscale x 2 x float> %v)
@@ -856,7 +856,7 @@ define float @vreduce_fmax_nxv4f32(<vscale x 4 x float> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv4f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> %v)
@@ -867,11 +867,11 @@ declare float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float>)
define float @vreduce_fmax_nxv32f32(<vscale x 32 x float> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv32f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float> %v)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float> %v)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv32f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float %red
;
%red = call float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float> %v)
@@ -886,7 +886,7 @@ define double @vreduce_fmax_nxv1f64(<vscale x 1 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
@@ -899,7 +899,7 @@ define double @vreduce_fmax_nxv1f64_nonans(<vscale x 1 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f64_nonans'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call nnan double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
@@ -912,7 +912,7 @@ define double @vreduce_fmax_nxv1f64_nonans_noinfs(<vscale x 1 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv1f64_nonans_noinfs'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call nnan ninf double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call nnan ninf double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call nnan ninf double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
@@ -927,7 +927,7 @@ define double @vreduce_fmax_nxv2f64(<vscale x 2 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv2f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv2f64(<vscale x 2 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv2f64(<vscale x 2 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmax.nxv2f64(<vscale x 2 x double> %v)
@@ -942,7 +942,7 @@ define double @vreduce_fmax_nxv4f64(<vscale x 4 x double> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv4f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv4f64(<vscale x 4 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv4f64(<vscale x 4 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmax.nxv4f64(<vscale x 4 x double> %v)
@@ -953,11 +953,11 @@ declare double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double>)
define double @vreduce_fmax_nxv16f64(<vscale x 16 x double> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv16f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double> %v)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double> %v)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %red
;
; SIZE-LABEL: 'vreduce_fmax_nxv16f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double %red
;
%red = call double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double> %v)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
index 80efe912..2807f75 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
@@ -27,7 +27,7 @@ define signext i8 @vreduce_umax_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv1i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv1i8(<vscale x 1 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv1i8(<vscale x 1 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.umax.nxv1i8(<vscale x 1 x i8> %v)
@@ -42,7 +42,7 @@ define signext i8 @vreduce_smax_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv1i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8> %v)
@@ -57,7 +57,7 @@ define signext i8 @vreduce_umin_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv1i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv1i8(<vscale x 1 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv1i8(<vscale x 1 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.umin.nxv1i8(<vscale x 1 x i8> %v)
@@ -72,7 +72,7 @@ define signext i8 @vreduce_smin_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv1i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8> %v)
@@ -147,7 +147,7 @@ define signext i8 @vreduce_umax_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv2i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv2i8(<vscale x 2 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv2i8(<vscale x 2 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.umax.nxv2i8(<vscale x 2 x i8> %v)
@@ -162,7 +162,7 @@ define signext i8 @vreduce_smax_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv2i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8> %v)
@@ -177,7 +177,7 @@ define signext i8 @vreduce_umin_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv2i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv2i8(<vscale x 2 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv2i8(<vscale x 2 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.umin.nxv2i8(<vscale x 2 x i8> %v)
@@ -192,7 +192,7 @@ define signext i8 @vreduce_smin_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv2i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8> %v)
@@ -267,7 +267,7 @@ define signext i8 @vreduce_umax_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv4i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv4i8(<vscale x 4 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv4i8(<vscale x 4 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.umax.nxv4i8(<vscale x 4 x i8> %v)
@@ -282,7 +282,7 @@ define signext i8 @vreduce_smax_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv4i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8> %v)
@@ -297,7 +297,7 @@ define signext i8 @vreduce_umin_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv4i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv4i8(<vscale x 4 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv4i8(<vscale x 4 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.umin.nxv4i8(<vscale x 4 x i8> %v)
@@ -312,7 +312,7 @@ define signext i8 @vreduce_smin_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i8 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv4i8'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i8 %red
;
%red = call i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8> %v)
@@ -419,7 +419,7 @@ define signext i16 @vreduce_umax_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv1i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv1i16(<vscale x 1 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv1i16(<vscale x 1 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.umax.nxv1i16(<vscale x 1 x i16> %v)
@@ -434,7 +434,7 @@ define signext i16 @vreduce_smax_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv1i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv1i16(<vscale x 1 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv1i16(<vscale x 1 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.smax.nxv1i16(<vscale x 1 x i16> %v)
@@ -449,7 +449,7 @@ define signext i16 @vreduce_umin_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv1i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv1i16(<vscale x 1 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv1i16(<vscale x 1 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.umin.nxv1i16(<vscale x 1 x i16> %v)
@@ -464,7 +464,7 @@ define signext i16 @vreduce_smin_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv1i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16> %v)
@@ -571,7 +571,7 @@ define signext i16 @vreduce_umax_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv2i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv2i16(<vscale x 2 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv2i16(<vscale x 2 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.umax.nxv2i16(<vscale x 2 x i16> %v)
@@ -586,7 +586,7 @@ define signext i16 @vreduce_smax_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv2i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv2i16(<vscale x 2 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv2i16(<vscale x 2 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.smax.nxv2i16(<vscale x 2 x i16> %v)
@@ -601,7 +601,7 @@ define signext i16 @vreduce_umin_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv2i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv2i16(<vscale x 2 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv2i16(<vscale x 2 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.umin.nxv2i16(<vscale x 2 x i16> %v)
@@ -616,7 +616,7 @@ define signext i16 @vreduce_smin_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv2i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16> %v)
@@ -723,7 +723,7 @@ define signext i16 @vreduce_umax_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv4i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv4i16(<vscale x 4 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv4i16(<vscale x 4 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.umax.nxv4i16(<vscale x 4 x i16> %v)
@@ -738,7 +738,7 @@ define signext i16 @vreduce_smax_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv4i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv4i16(<vscale x 4 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv4i16(<vscale x 4 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.smax.nxv4i16(<vscale x 4 x i16> %v)
@@ -753,7 +753,7 @@ define signext i16 @vreduce_umin_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv4i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv4i16(<vscale x 4 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv4i16(<vscale x 4 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.umin.nxv4i16(<vscale x 4 x i16> %v)
@@ -768,7 +768,7 @@ define signext i16 @vreduce_smin_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i16 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv4i16'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i16 %red
;
%red = call i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16> %v)
@@ -875,7 +875,7 @@ define signext i32 @vreduce_umax_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv1i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> %v)
@@ -890,7 +890,7 @@ define signext i32 @vreduce_smax_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv1i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv1i32(<vscale x 1 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv1i32(<vscale x 1 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.smax.nxv1i32(<vscale x 1 x i32> %v)
@@ -905,7 +905,7 @@ define signext i32 @vreduce_umin_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv1i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv1i32(<vscale x 1 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv1i32(<vscale x 1 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.umin.nxv1i32(<vscale x 1 x i32> %v)
@@ -920,7 +920,7 @@ define signext i32 @vreduce_smin_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv1i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32> %v)
@@ -1027,7 +1027,7 @@ define signext i32 @vreduce_umax_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv2i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv2i32(<vscale x 2 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv2i32(<vscale x 2 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.umax.nxv2i32(<vscale x 2 x i32> %v)
@@ -1042,7 +1042,7 @@ define signext i32 @vreduce_smax_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv2i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv2i32(<vscale x 2 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv2i32(<vscale x 2 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.smax.nxv2i32(<vscale x 2 x i32> %v)
@@ -1057,7 +1057,7 @@ define signext i32 @vreduce_umin_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv2i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv2i32(<vscale x 2 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv2i32(<vscale x 2 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.umin.nxv2i32(<vscale x 2 x i32> %v)
@@ -1072,7 +1072,7 @@ define signext i32 @vreduce_smin_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv2i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32> %v)
@@ -1141,7 +1141,7 @@ define signext i32 @vreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
define signext i32 @vwreduce_add_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vwreduce_add_nxv4i16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = sext <vscale x 4 x i16> %v to <vscale x 4 x i32>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = sext <vscale x 4 x i16> %v to <vscale x 4 x i32>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
@@ -1157,7 +1157,7 @@ define signext i32 @vwreduce_add_nxv4i16(<vscale x 4 x i16> %v) {
define signext i32 @vwreduce_uadd_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vwreduce_uadd_nxv4i16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = zext <vscale x 4 x i16> %v to <vscale x 4 x i32>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = zext <vscale x 4 x i16> %v to <vscale x 4 x i32>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
@@ -1179,7 +1179,7 @@ define signext i32 @vreduce_umax_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv4i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> %v)
@@ -1194,7 +1194,7 @@ define signext i32 @vreduce_smax_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv4i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> %v)
@@ -1209,7 +1209,7 @@ define signext i32 @vreduce_umin_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv4i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> %v)
@@ -1224,7 +1224,7 @@ define signext i32 @vreduce_smin_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv4i32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 %red
;
%red = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %v)
@@ -1331,7 +1331,7 @@ define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv1i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64> %v)
@@ -1346,7 +1346,7 @@ define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv1i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64> %v)
@@ -1361,7 +1361,7 @@ define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv1i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64> %v)
@@ -1376,7 +1376,7 @@ define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv1i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64> %v)
@@ -1445,7 +1445,7 @@ define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
define i64 @vwreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vwreduce_add_nxv2i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
@@ -1461,7 +1461,7 @@ define i64 @vwreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
define i64 @vwreduce_uadd_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vwreduce_uadd_nxv2i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
@@ -1483,7 +1483,7 @@ define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv2i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %v)
@@ -1498,7 +1498,7 @@ define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv2i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %v)
@@ -1513,7 +1513,7 @@ define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv2i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %v)
@@ -1528,7 +1528,7 @@ define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv2i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %v)
@@ -1597,7 +1597,7 @@ define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
define i64 @vwreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vwreduce_add_nxv4i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = sext <vscale x 4 x i32> %v to <vscale x 4 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e = sext <vscale x 4 x i32> %v to <vscale x 4 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
@@ -1613,7 +1613,7 @@ define i64 @vwreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
define i64 @vwreduce_uadd_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vwreduce_uadd_nxv4i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = zext <vscale x 4 x i32> %v to <vscale x 4 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e = zext <vscale x 4 x i32> %v to <vscale x 4 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
@@ -1635,7 +1635,7 @@ define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_umax_nxv4i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64> %v)
@@ -1650,7 +1650,7 @@ define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_smax_nxv4i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64> %v)
@@ -1665,7 +1665,7 @@ define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_umin_nxv4i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64> %v)
@@ -1680,7 +1680,7 @@ define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
; SIZE-LABEL: 'vreduce_smin_nxv4i64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64> %v)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64> %v)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i64 %red
;
%red = call i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64> %v)
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll
index 225bad6..aa7a90b 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll
@@ -12,12 +12,12 @@ define void @extractelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -66,12 +66,12 @@ define void @extractelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -120,12 +120,12 @@ define void @extractelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
@@ -177,12 +177,12 @@ define void @extractelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -231,12 +231,12 @@ define void @extractelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -285,12 +285,12 @@ define void @extractelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
@@ -341,13 +341,13 @@ define void @extractelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i1_0 = extractelement <2 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -395,13 +395,13 @@ define void @extractelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_1 = extractelement <2 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -449,13 +449,13 @@ define void @extractelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_x = extractelement <2 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
@@ -506,13 +506,13 @@ define void @extractelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i1_0 = extractelement <2 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -560,13 +560,13 @@ define void @extractelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_1 = extractelement <2 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -614,13 +614,13 @@ define void @extractelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_x = extractelement <2 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll
index 5387c8d..8b68480 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll
@@ -12,12 +12,12 @@ define void @insertelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -66,12 +66,12 @@ define void @insertelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -120,12 +120,12 @@ define void @insertelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
@@ -177,12 +177,12 @@ define void @insertelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -231,12 +231,12 @@ define void @insertelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -285,12 +285,12 @@ define void @insertelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
@@ -341,13 +341,13 @@ define void @insertelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v2i1_0 = insertelement <2 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -395,13 +395,13 @@ define void @insertelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2i1_1 = insertelement <2 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -449,13 +449,13 @@ define void @insertelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v2i1_x = insertelement <2 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
@@ -506,13 +506,13 @@ define void @insertelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v2i1_0 = insertelement <2 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -560,13 +560,13 @@ define void @insertelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2i1_1 = insertelement <2 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -614,13 +614,13 @@ define void @insertelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v2i1_x = insertelement <2 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
index 46bf315..79ba156 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
@@ -197,7 +197,7 @@ define void @broadcast_fixed() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %41 = shufflevector <32 x i1> undef, <32 x i1> undef, <32 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %42 = shufflevector <64 x i1> undef, <64 x i1> undef, <64 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %43 = shufflevector <128 x i1> undef, <128 x i1> undef, <128 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %ins1 = insertelement <128 x i1> poison, i1 poison, i32 0
+; CHECK-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %ins1 = insertelement <128 x i1> poison, i1 poison, i32 0
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %44 = shufflevector <128 x i1> %ins1, <128 x i1> poison, <128 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %ins2 = insertelement <2 x i8> poison, i8 3, i32 0
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %45 = shufflevector <2 x i8> %ins2, <2 x i8> undef, <2 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll
index 9a333dc..a91d562 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll
@@ -520,3 +520,69 @@ define void @test_vXi8(<2 x i8> %src16, <4 x i8> %src32, <8 x i8> %src64, <16x i
ret void
}
+
+define void @fixed_m1_in_m2_notail(<8 x i32> %src, <8 x i32> %passthru) vscale_range(2) {
+; CHECK-LABEL: 'fixed_m1_in_m2_notail'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %1 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 11, i32 5, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 10, i32 11, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 9, i32 10, i32 11, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'fixed_m1_in_m2_notail'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %1 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 11, i32 5, i32 6, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 10, i32 11, i32 6, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 9, i32 10, i32 11, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+ shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 11, i32 5, i32 6, i32 7>
+ shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 10, i32 11, i32 6, i32 7>
+ shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 9, i32 10, i32 11, i32 7>
+ shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+ ret void
+}
+
+define void @fixed_m2_in_m4_notail(<8 x i64> %src, <8 x i64> %passthru) vscale_range(2) {
+; CHECK-LABEL: 'fixed_m2_in_m4_notail'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %1 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 41 for instruction: %2 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 8, i32 8, i32 10, i32 11, i32 5, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 41 for instruction: %3 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 10, i32 11, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 41 for instruction: %4 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 8, i32 10, i32 11, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 41 for instruction: %5 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 8, i32 10, i32 11>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'fixed_m2_in_m4_notail'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %1 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %2 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 8, i32 8, i32 10, i32 11, i32 5, i32 6, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %3 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 10, i32 11, i32 6, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %4 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 8, i32 10, i32 11, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %5 = shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 8, i32 10, i32 11>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+ shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 8, i32 8, i32 10, i32 11, i32 5, i32 6, i32 7>
+ shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 10, i32 11, i32 6, i32 7>
+ shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 8, i32 10, i32 11, i32 7>
+ shufflevector <8 x i64> %src, <8 x i64> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 8, i32 10, i32 11>
+ ret void
+}
+
+define void @fixed_m1_in_m2_tail(<8 x i32> %src, <8 x i32> %passthru) vscale_range(2) {
+; CHECK-LABEL: 'fixed_m1_in_m2_tail'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %1 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 4, i32 4, i32 5, i32 6, i32 8>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %2 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 8, i32 10, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'fixed_m1_in_m2_tail'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %1 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 4, i32 4, i32 5, i32 6, i32 8>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %2 = shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 8, i32 10, i32 7>
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 8, i32 9, i32 10, i32 4, i32 4, i32 5, i32 6, i32 8>
+ shufflevector <8 x i32> %src, <8 x i32> %passthru, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 8, i32 10, i32 7>
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll b/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll
index f5936a7..10786e1 100644
--- a/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll
+++ b/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll
@@ -178,7 +178,7 @@ define void @log2(float %a, <16 x float> %va) {
ret void
}
-define void @constrained_fadd(float %a, <16 x float> %va) {
+define void @constrained_fadd(float %a, <16 x float> %va) strictfp {
; THRU-LABEL: 'constrained_fadd'
; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
; THRU-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
index 4ebb1e3..09fbd68 100644
--- a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
+++ b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
@@ -25,6 +25,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a8 = call i1 @llvm.allow.runtime.check(metadata !"test_check")
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
@@ -45,6 +47,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a8 = call i1 @llvm.allow.runtime.check(metadata !"test_check")
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
@@ -65,6 +69,8 @@ define i32 @trivially_free() {
call void @llvm.lifetime.end.p0(i64 1, ptr undef)
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
+ %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
+ %a8 = call i1 @llvm.allow.runtime.check(metadata !"test_check")
call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
ret i32 undef
}
diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
index 5d47e48..3e78c62 100644
--- a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
+++ b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
@@ -24,6 +24,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a8 = call i1 @llvm.allow.runtime.check(metadata !"test_check")
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
; CHECK-THROUGHPUT-LABEL: 'trivially_free'
@@ -44,6 +46,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a8 = call i1 @llvm.allow.runtime.check(metadata !"test_check")
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
%a0 = call i32 @llvm.annotation.i32(i32 undef, ptr undef, ptr undef, i32 undef)
@@ -64,6 +68,8 @@ define i32 @trivially_free() {
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
+ %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
+ %a8 = call i1 @llvm.allow.runtime.check(metadata !"test_check")
ret i32 undef
}
diff --git a/llvm/test/Analysis/Lint/crash_empty_iterator.ll b/llvm/test/Analysis/Lint/crash_empty_iterator.ll
new file mode 100644
index 0000000..2fbecbc
--- /dev/null
+++ b/llvm/test/Analysis/Lint/crash_empty_iterator.ll
@@ -0,0 +1,22 @@
+; RUN: opt -passes="lint" -S < %s | FileCheck %s
+
+; After 2fe81edef6f0b
+; [NFC][RemoveDIs] Insert instruction using iterators in Transforms/
+; this crashed in FindInsertedValue when dereferencing an empty
+; optional iterator.
+; Just see that it doesn't crash anymore.
+
+; CHECK-LABEL: @test1
+
+%struct = type { i32, i32 }
+
+define void @test1() {
+entry:
+ %.fca.1.insert = insertvalue %struct zeroinitializer, i32 0, 1
+ %0 = extractvalue %struct %.fca.1.insert, 0
+ %1 = tail call %struct @foo(i32 %0)
+ ret void
+}
+
+declare %struct @foo(i32)
+
diff --git a/llvm/test/Analysis/MemorySSA/allow-check.ll b/llvm/test/Analysis/MemorySSA/allow-check.ll
new file mode 100644
index 0000000..dcdad00
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/allow-check.ll
@@ -0,0 +1,29 @@
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s --implicit-check-not=MemoryDef
+;
+; Ensures that allow.*.check are treated as not reading or writing memory.
+
+target triple = "aarch64-linux"
+
+define i1 @test_runtime(ptr %a) local_unnamed_addr {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+ store i32 4, ptr %a, align 4
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ %0 = load i32, ptr %a, align 4
+; CHECK: MemoryUse(1)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata)
+
+define i1 @test_ubsan(ptr %a) local_unnamed_addr {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+ store i32 4, ptr %a, align 4
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ %0 = load i32, ptr %a, align 4
+; CHECK: MemoryUse(1)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8)
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
index 333ec6d..26c85e8 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
@@ -197,78 +197,38 @@ bb:
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1) %gep)
+; CHECK: DIVERGENT: %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1) %addr)
define amdgpu_kernel void @global_load_tr_b64_v2i32(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1) %gep)
+ %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1) %addr)
store <2 x i32> %tmp0, ptr addrspace(1) %out, align 8
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1) %gep)
+; CHECK: DIVERGENT: %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1) %addr)
define amdgpu_kernel void @global_load_tr_b128_v8i16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1) %gep)
+ %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1) %addr)
store <8 x i16> %tmp0, ptr addrspace(1) %out, align 16
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v8f16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1) %gep)
- store <8 x half> %tmp0, ptr addrspace(1) %out, align 16
- ret void
-}
-
-; CHECK: DIVERGENT: %tmp0 = call <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v8bf16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16(ptr addrspace(1) %gep)
- store <8 x bfloat> %tmp0, ptr addrspace(1) %out, align 16
- ret void
-}
-
-; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1) %gep)
+; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1) %addr)
define amdgpu_kernel void @global_load_tr_b64_i32(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1) %gep)
+ %tmp0 = call i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1) %addr)
store i32 %tmp0, ptr addrspace(1) %out, align 4
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v4i16_(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
+; CHECK: DIVERGENT: %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1) %addr)
+define amdgpu_kernel void @global_load_tr_b128_v4i16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1) %gep)
+ %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1) %addr)
store <4 x i16> %tmp0, ptr addrspace(1) %out, align 8
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v4f16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1) %gep)
- store <4 x half> %tmp0, ptr addrspace(1) %out, align 8
- ret void
-}
-
-; CHECK: DIVERGENT: %tmp0 = call <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v4bf16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16(ptr addrspace(1) %gep)
- store <4 x bfloat> %tmp0, ptr addrspace(1) %out, align 8
- ret void
-}
-
declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #1
declare i32 @llvm.amdgcn.permlane16(i32, i32, i32, i32, i1, i1) #1
declare i32 @llvm.amdgcn.permlanex16(i32, i32, i32, i32, i1, i1) #1
@@ -296,14 +256,10 @@ declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.fp8.bf8(<2 x i32>, <4 x i32
declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf8.fp8(<2 x i32>, <4 x i32>, <8 x float>, i16)
declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf8.bf8(<2 x i32>, <4 x i32>, <8 x float>, i16)
-declare <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1))
-declare <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1))
-declare <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1))
-declare <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16(ptr addrspace(1))
-declare i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1))
-declare <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1))
-declare <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1))
-declare <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16(ptr addrspace(1))
+declare <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1))
+declare <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1))
+declare i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1))
+declare <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1))
attributes #0 = { nounwind convergent }
attributes #1 = { nounwind readnone convergent }
diff --git a/llvm/test/Analysis/ValueTracking/known-non-zero.ll b/llvm/test/Analysis/ValueTracking/known-non-zero.ll
index d804fe9..0159050 100644
--- a/llvm/test/Analysis/ValueTracking/known-non-zero.ll
+++ b/llvm/test/Analysis/ValueTracking/known-non-zero.ll
@@ -1292,4 +1292,162 @@ true:
false:
ret i1 %ne
}
+
+define <2 x i1> @range_metadata_vec(ptr %p, <2 x i32> %x) {
+; CHECK-LABEL: @range_metadata_vec(
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
+;
+ %v = load <2 x i32>, ptr %p, !range !{i32 1, i32 100}
+ %or = or <2 x i32> %v, %x
+ %cmp = icmp ne <2 x i32> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+define i1 @range_attr(i8 range(i8 1, 0) %x, i8 %y) {
+; CHECK-LABEL: @range_attr(
+; CHECK-NEXT: ret i1 false
+;
+ %or = or i8 %y, %x
+ %cmp = icmp eq i8 %or, 0
+ ret i1 %cmp
+}
+
+define i1 @neg_range_attr(i8 range(i8 -1, 1) %x, i8 %y) {
+; CHECK-LABEL: @neg_range_attr(
+; CHECK-NEXT: [[I:%.*]] = or i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[I]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %or = or i8 %y, %x
+ %cmp = icmp eq i8 %or, 0
+ ret i1 %cmp
+}
+
+declare range(i8 1, 0) i8 @returns_non_zero_range_helper()
+declare range(i8 -1, 1) i8 @returns_contain_zero_range_helper()
+
+define i1 @range_return(i8 %y) {
+; CHECK-LABEL: @range_return(
+; CHECK-NEXT: [[I:%.*]] = call i8 @returns_non_zero_range_helper()
+; CHECK-NEXT: ret i1 false
+;
+ %x = call i8 @returns_non_zero_range_helper()
+ %or = or i8 %y, %x
+ %cmp = icmp eq i8 %or, 0
+ ret i1 %cmp
+}
+
+define i1 @neg_range_return(i8 %y) {
+; CHECK-LABEL: @neg_range_return(
+; CHECK-NEXT: [[I:%.*]] = call i8 @returns_contain_zero_range_helper()
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[I]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[OR]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x = call i8 @returns_contain_zero_range_helper()
+ %or = or i8 %y, %x
+ %cmp = icmp eq i8 %or, 0
+ ret i1 %cmp
+}
+
+declare i8 @returns_i8_helper()
+
+define i1 @range_call(i8 %y) {
+; CHECK-LABEL: @range_call(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 1, 0) i8 @returns_i8_helper()
+; CHECK-NEXT: ret i1 false
+;
+ %x = call range(i8 1, 0) i8 @returns_i8_helper()
+ %or = or i8 %y, %x
+ %cmp = icmp eq i8 %or, 0
+ ret i1 %cmp
+}
+
+define i1 @neg_range_call(i8 %y) {
+; CHECK-LABEL: @neg_range_call(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 -1, 1) i8 @returns_i8_helper()
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[I]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[OR]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x = call range(i8 -1, 1) i8 @returns_i8_helper()
+ %or = or i8 %y, %x
+ %cmp = icmp eq i8 %or, 0
+ ret i1 %cmp
+}
+
+define <2 x i1> @range_attr_vec(<2 x i8> range(i8 1, 0) %x, <2 x i8> %y) {
+; CHECK-LABEL: @range_attr_vec(
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
+;
+ %or = or <2 x i8> %y, %x
+ %cmp = icmp ne <2 x i8> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @neg_range_attr_vec(<2 x i8> range(i8 -1, 1) %x, <2 x i8> %y) {
+; CHECK-LABEL: @neg_range_attr_vec(
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[OR]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %or = or <2 x i8> %y, %x
+ %cmp = icmp ne <2 x i8> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+declare range(i8 1, 0) <2 x i8> @returns_non_zero_range_helper_vec()
+declare range(i8 -1, 1) <2 x i8> @returns_contain_zero_range_helper_vec()
+
+define <2 x i1> @range_return_vec(<2 x i8> %y) {
+; CHECK-LABEL: @range_return_vec(
+; CHECK-NEXT: [[I:%.*]] = call <2 x i8> @returns_non_zero_range_helper_vec()
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
+;
+ %x = call <2 x i8> @returns_non_zero_range_helper_vec()
+ %or = or <2 x i8> %y, %x
+ %cmp = icmp ne <2 x i8> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @neg_range_return_vec(<2 x i8> %y) {
+; CHECK-LABEL: @neg_range_return_vec(
+; CHECK-NEXT: [[I:%.*]] = call <2 x i8> @returns_contain_zero_range_helper_vec()
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[Y:%.*]], [[I]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[OR]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %x = call <2 x i8> @returns_contain_zero_range_helper_vec()
+ %or = or <2 x i8> %y, %x
+ %cmp = icmp ne <2 x i8> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+declare <2 x i8> @returns_i8_helper_vec()
+
+define <2 x i1> @range_call_vec(<2 x i8> %y) {
+; CHECK-LABEL: @range_call_vec(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 1, 0) <2 x i8> @returns_i8_helper_vec()
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
+;
+ %x = call range(i8 1, 0) <2 x i8> @returns_i8_helper_vec()
+ %or = or <2 x i8> %y, %x
+ %cmp = icmp ne <2 x i8> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @neg_range_call_vec(<2 x i8> %y) {
+; CHECK-LABEL: @neg_range_call_vec(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 -1, 1) <2 x i8> @returns_i8_helper_vec()
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[Y:%.*]], [[I]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[OR]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %x = call range(i8 -1, 1) <2 x i8> @returns_i8_helper_vec()
+ %or = or <2 x i8> %y, %x
+ %cmp = icmp ne <2 x i8> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+
declare i32 @llvm.experimental.get.vector.length.i32(i32, i32, i1)
diff --git a/llvm/test/Analysis/ValueTracking/knownbits-select-from-cond.ll b/llvm/test/Analysis/ValueTracking/knownbits-select-from-cond.ll
new file mode 100644
index 0000000..c3343edf
--- /dev/null
+++ b/llvm/test/Analysis/ValueTracking/knownbits-select-from-cond.ll
@@ -0,0 +1,81 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=instcombine -S < %s | FileCheck %s
+
+define i8 @select_condition_implies_highbits_op1(i8 %xx, i8 noundef %y) {
+; CHECK-LABEL: @select_condition_implies_highbits_op1(
+; CHECK-NEXT: [[X:%.*]] = and i8 [[XX:%.*]], 15
+; CHECK-NEXT: [[COND:%.*]] = icmp ult i8 [[Y:%.*]], 3
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], i8 [[Y]], i8 [[X]]
+; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[SEL]], 32
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %x = and i8 %xx, 15
+ %cond = icmp ult i8 %y, 3
+ %sel = select i1 %cond, i8 %y, i8 %x
+ %r = add i8 %sel, 32
+ ret i8 %r
+}
+
+define i8 @select_condition_implies_highbits_op1_maybe_undef_fail(i8 %xx, i8 %y) {
+; CHECK-LABEL: @select_condition_implies_highbits_op1_maybe_undef_fail(
+; CHECK-NEXT: [[X:%.*]] = and i8 [[XX:%.*]], 15
+; CHECK-NEXT: [[COND:%.*]] = icmp ult i8 [[Y:%.*]], 3
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], i8 [[Y]], i8 [[X]]
+; CHECK-NEXT: [[R:%.*]] = add i8 [[SEL]], 32
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %x = and i8 %xx, 15
+ %cond = icmp ult i8 %y, 3
+ %sel = select i1 %cond, i8 %y, i8 %x
+ %r = add i8 %sel, 32
+ ret i8 %r
+}
+
+define i8 @select_condition_implies_highbits_op2(i8 %xx, i8 noundef %y) {
+; CHECK-LABEL: @select_condition_implies_highbits_op2(
+; CHECK-NEXT: [[X:%.*]] = and i8 [[XX:%.*]], 15
+; CHECK-NEXT: [[COND:%.*]] = icmp ugt i8 [[Y:%.*]], 3
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], i8 [[X]], i8 [[Y]]
+; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[SEL]], 32
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %x = and i8 %xx, 15
+ %cond = icmp ugt i8 %y, 3
+ %sel = select i1 %cond, i8 %x, i8 %y
+ %r = add i8 %sel, 32
+ ret i8 %r
+}
+
+define i8 @select_condition_implies_highbits_op1_and(i8 %xx, i8 noundef %y, i1 %other_cond) {
+; CHECK-LABEL: @select_condition_implies_highbits_op1_and(
+; CHECK-NEXT: [[X:%.*]] = and i8 [[XX:%.*]], 15
+; CHECK-NEXT: [[COND0:%.*]] = icmp ult i8 [[Y:%.*]], 3
+; CHECK-NEXT: [[COND:%.*]] = and i1 [[COND0]], [[OTHER_COND:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], i8 [[Y]], i8 [[X]]
+; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[SEL]], 32
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %x = and i8 %xx, 15
+ %cond0 = icmp ult i8 %y, 3
+ %cond = and i1 %cond0, %other_cond
+ %sel = select i1 %cond, i8 %y, i8 %x
+ %r = add i8 %sel, 32
+ ret i8 %r
+}
+
+define i8 @select_condition_implies_highbits_op2_or(i8 %xx, i8 noundef %y, i1 %other_cond) {
+; CHECK-LABEL: @select_condition_implies_highbits_op2_or(
+; CHECK-NEXT: [[X:%.*]] = and i8 [[XX:%.*]], 15
+; CHECK-NEXT: [[COND0:%.*]] = icmp ugt i8 [[Y:%.*]], 3
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[COND0]], [[OTHER_COND:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], i8 [[X]], i8 [[Y]]
+; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[SEL]], 32
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %x = and i8 %xx, 15
+ %cond0 = icmp ugt i8 %y, 3
+ %cond = or i1 %cond0, %other_cond
+ %sel = select i1 %cond, i8 %x, i8 %y
+ %r = add i8 %sel, 32
+ ret i8 %r
+}
diff --git a/llvm/test/Assembler/debug-info.ll b/llvm/test/Assembler/debug-info.ll
index 419623a..06144b2 100644
--- a/llvm/test/Assembler/debug-info.ll
+++ b/llvm/test/Assembler/debug-info.ll
@@ -1,8 +1,8 @@
; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
; RUN: verify-uselistorder %s
-; CHECK: !named = !{!0, !0, !1, !2, !3, !4, !5, !6, !7, !8, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39}
-!named = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42}
+; CHECK: !named = !{!0, !0, !1, !2, !3, !4, !5, !6, !7, !8, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43}
+!named = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43, !44, !45, !46}
; CHECK: !0 = !DISubrange(count: 3, lowerBound: 0)
; CHECK-NEXT: !1 = !DISubrange(count: 3, lowerBound: 4)
@@ -99,3 +99,15 @@
; CHECK-NEXT: !39 = !DIBasicType(name: "u64.le", size: 64, align: 1, encoding: DW_ATE_unsigned, flags: DIFlagLittleEndian)
!41 = !DIBasicType(name: "u64.be", size: 64, align: 1, encoding: DW_ATE_unsigned, flags: DIFlagBigEndian)
!42 = !DIBasicType(name: "u64.le", size: 64, align: 1, encoding: DW_ATE_unsigned, flags: DIFlagLittleEndian)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: false, ptrAuthAuthenticatesNullValues: false)
+!43 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: false)
+!44 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: false, ptrAuthAuthenticatesNullValues: true)
+!45 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthAuthenticatesNullValues: true)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: true)
+!46 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: true)
diff --git a/llvm/test/Assembler/flags.ll b/llvm/test/Assembler/flags.ll
index 04bddd0..d75b0cb 100644
--- a/llvm/test/Assembler/flags.ll
+++ b/llvm/test/Assembler/flags.ll
@@ -261,3 +261,51 @@ define i64 @test_or(i64 %a, i64 %b) {
%res = or disjoint i64 %a, %b
ret i64 %res
}
+
+define i32 @test_trunc_signed(i64 %a) {
+; CHECK: %res = trunc nsw i64 %a to i32
+ %res = trunc nsw i64 %a to i32
+ ret i32 %res
+}
+
+define i32 @test_trunc_unsigned(i64 %a) {
+; CHECK: %res = trunc nuw i64 %a to i32
+ %res = trunc nuw i64 %a to i32
+ ret i32 %res
+}
+
+define i32 @test_trunc_both(i64 %a) {
+; CHECK: %res = trunc nuw nsw i64 %a to i32
+ %res = trunc nuw nsw i64 %a to i32
+ ret i32 %res
+}
+
+define i32 @test_trunc_both_reversed(i64 %a) {
+; CHECK: %res = trunc nuw nsw i64 %a to i32
+ %res = trunc nsw nuw i64 %a to i32
+ ret i32 %res
+}
+
+define <2 x i32> @test_trunc_signed_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nsw <2 x i64> %a to <2 x i32>
+ %res = trunc nsw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <2 x i32> @test_trunc_unsigned_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nuw <2 x i64> %a to <2 x i32>
+ %res = trunc nuw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <2 x i32> @test_trunc_both_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nuw nsw <2 x i64> %a to <2 x i32>
+ %res = trunc nuw nsw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <2 x i32> @test_trunc_both_reversed_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nuw nsw <2 x i64> %a to <2 x i32>
+ %res = trunc nsw nuw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
diff --git a/llvm/test/Assembler/getelementptr.ll b/llvm/test/Assembler/getelementptr.ll
index 50695a6..45c6a2d 100644
--- a/llvm/test/Assembler/getelementptr.ll
+++ b/llvm/test/Assembler/getelementptr.ll
@@ -23,24 +23,26 @@
@PR23753_b = global ptr getelementptr (i8, ptr @PR23753_a, i64 ptrtoint (ptr @PR23753_a to i64))
; CHECK: @PR23753_b = global ptr getelementptr (i8, ptr @PR23753_a, i64 ptrtoint (ptr @PR23753_a to i64))
-; Verify that inrange on an index inhibits over-indexed getelementptr folding.
+; Verify that inrange doesn't inhibit over-indexed getelementptr folding,
+; but does inhibit combining two GEPs where the inner one has inrange (this
+; will be done when DataLayout is available instead).
@nestedarray = global [2 x [4 x ptr]] zeroinitializer
-; CHECK: @nestedarray.1 = alias ptr, getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, inrange i32 0, i64 1, i32 0)
-@nestedarray.1 = alias ptr, getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, inrange i32 0, i32 0, i32 4)
+; CHECK: @nestedarray.1 = alias ptr, getelementptr inbounds inrange(-32, 32) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i64 1, i32 0)
+@nestedarray.1 = alias ptr, getelementptr inbounds inrange(-32, 32) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 0, i32 4)
-; CHECK: @nestedarray.2 = alias ptr, getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, i32 0, inrange i32 0, i32 4)
-@nestedarray.2 = alias ptr, getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, i32 0, inrange i32 0, i32 4)
+; CHECK: @nestedarray.2 = alias ptr, getelementptr inbounds inrange(0, 1) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i64 1, i32 0)
+@nestedarray.2 = alias ptr, getelementptr inbounds inrange(0, 1) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 0, i32 4)
-; CHECK: @nestedarray.3 = alias ptr, getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, i32 0, inrange i32 0)
-@nestedarray.3 = alias ptr, getelementptr inbounds ([4 x ptr], ptr getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, i32 0, inrange i32 0), i32 0, i32 0)
+; CHECK: @nestedarray.3 = alias ptr, getelementptr inbounds inrange(0, 4) ([4 x ptr], ptr @nestedarray, i32 0, i32 0)
+@nestedarray.3 = alias ptr, getelementptr inbounds inrange(0, 4) ([4 x ptr], ptr getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 0), i32 0, i32 0)
-; CHECK: @nestedarray.4 = alias ptr, getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 1, i32 0)
-@nestedarray.4 = alias ptr, getelementptr inbounds ([4 x ptr], ptr getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, i32 0, inrange i32 0), i32 1, i32 0)
+; CHECK: @nestedarray.4 = alias ptr, getelementptr inbounds ([4 x ptr], ptr getelementptr inbounds inrange(0, 4) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 0), i32 1, i32 0)
+@nestedarray.4 = alias ptr, getelementptr inbounds ([4 x ptr], ptr getelementptr inbounds inrange(0, 4) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 0), i32 1, i32 0)
-; CHECK: @nestedarray.5 = alias ptr, getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, inrange i32 0, i32 1, i32 0)
-@nestedarray.5 = alias ptr, getelementptr inbounds ([4 x ptr], ptr getelementptr inbounds ([2 x [4 x ptr]], ptr @nestedarray, inrange i32 0, i32 0), i32 1, i32 0)
+; CHECK: @nestedarray.5 = alias ptr, getelementptr inbounds ([4 x ptr], ptr getelementptr inbounds inrange(0, 32) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 0), i32 1, i32 0)
+@nestedarray.5 = alias ptr, getelementptr inbounds ([4 x ptr], ptr getelementptr inbounds inrange(0, 32) ([2 x [4 x ptr]], ptr @nestedarray, i32 0, i32 0), i32 1, i32 0)
; See if i92 indices work too.
define ptr @test(ptr %t, i92 %n) {
diff --git a/llvm/test/Assembler/inrange-errors.ll b/llvm/test/Assembler/inrange-errors.ll
new file mode 100644
index 0000000..128c219
--- /dev/null
+++ b/llvm/test/Assembler/inrange-errors.ll
@@ -0,0 +1,46 @@
+; RUN: split-file %s %t
+; RUN: not llvm-as < %t/parse-error-1.ll -o /dev/null 2>&1 | FileCheck --check-prefix=PARSE-ERROR-1 %s
+; RUN: not llvm-as < %t/parse-error-2.ll -o /dev/null 2>&1 | FileCheck --check-prefix=PARSE-ERROR-2 %s
+; RUN: not llvm-as < %t/parse-error-3.ll -o /dev/null 2>&1 | FileCheck --check-prefix=PARSE-ERROR-3 %s
+; RUN: not llvm-as < %t/parse-error-4.ll -o /dev/null 2>&1 | FileCheck --check-prefix=PARSE-ERROR-4 %s
+; RUN: not llvm-as < %t/end-not-larger-start.ll -o /dev/null 2>&1 | FileCheck --check-prefix=END-NOT-LARGER-START %s
+
+;--- parse-error-1.ll
+
+; PARSE-ERROR-1: error: expected integer
+@g = external global i8
+define ptr @test() {
+ ret ptr getelementptr inrange (i8, ptr @g, i64 8)
+}
+
+;--- parse-error-2.ll
+
+; PARSE-ERROR-2: error: expected ','
+@g = external global i8
+define ptr @test() {
+ ret ptr getelementptr inrange(42 (i8, ptr @g, i64 8)
+}
+
+;--- parse-error-3.ll
+
+; PARSE-ERROR-3: error: expected integer
+@g = external global i8
+define ptr @test() {
+ ret ptr getelementptr inrange(42, (i8, ptr @g, i64 8)
+}
+
+;--- parse-error-4.ll
+
+; PARSE-ERROR-4: error: expected ')'
+@g = external global i8
+define ptr @test() {
+ ret ptr getelementptr inrange(42, 123 (i8, ptr @g, i64 8)
+}
+
+;--- end-not-larger-start.ll
+
+; END-NOT-LARGER-START: error: expected end to be larger than start
+@g = external global i8
+define ptr @test() {
+ ret ptr getelementptr inrange(42, 42) (i8, ptr @g, i64 8)
+}
diff --git a/llvm/test/Bindings/OCaml/core.ml b/llvm/test/Bindings/OCaml/core.ml
index a9abc9d..64bfa8e 100644
--- a/llvm/test/Bindings/OCaml/core.ml
+++ b/llvm/test/Bindings/OCaml/core.ml
@@ -252,7 +252,7 @@ let test_constants () =
group "constant arithmetic";
(* CHECK: @const_neg = global i64 sub
* CHECK: @const_nsw_neg = global i64 sub nsw
- * CHECK: @const_nuw_neg = global i64 sub nuw
+ * CHECK: @const_nuw_neg = global i64 sub
* CHECK: @const_not = global i64 xor
* CHECK: @const_add = global i64 add
* CHECK: @const_nsw_add = global i64 add nsw
diff --git a/llvm/test/Bindings/OCaml/debuginfo.ml b/llvm/test/Bindings/OCaml/debuginfo.ml
index d469d47..f95800d 100644
--- a/llvm/test/Bindings/OCaml/debuginfo.ml
+++ b/llvm/test/Bindings/OCaml/debuginfo.ml
@@ -39,6 +39,8 @@ let prepare_target llmod =
let new_module () =
let m = Llvm.create_module context module_name in
let () = prepare_target m in
+ let () = Llvm_debuginfo.set_is_new_dbg_info_format m true in
+ insist (Llvm_debuginfo.is_new_dbg_info_format m);
m
let test_get_module () =
@@ -285,8 +287,8 @@ let test_variables f dibuilder file_di fun_di =
~var_info:auto_var ~expr:(Llvm_debuginfo.dibuild_expression dibuilder [||])
~location ~instr:entry_term
in
- let () = Printf.printf "%s\n" (Llvm.string_of_llvalue vdi) in
- (* CHECK: call void @llvm.dbg.declare(metadata ptr %my_alloca, metadata {{![0-9]+}}, metadata !DIExpression()), !dbg {{\![0-9]+}}
+ let () = Printf.printf "%s\n" (Llvm.string_of_lldbgrecord vdi) in
+ (* CHECK: dbg_declare(ptr %my_alloca, ![[#]], !DIExpression(), ![[#]])
*)
let arg0 = (Llvm.params f).(0) in
let arg_var = Llvm_debuginfo.dibuild_create_parameter_variable dibuilder ~scope:fun_di
@@ -297,8 +299,8 @@ let test_variables f dibuilder file_di fun_di =
~var_info:arg_var ~expr:(Llvm_debuginfo.dibuild_expression dibuilder [||])
~location ~instr:entry_term
in
- let () = Printf.printf "%s\n" (Llvm.string_of_llvalue argdi) in
- (* CHECK: call void @llvm.dbg.declare(metadata i32 %0, metadata {{![0-9]+}}, metadata !DIExpression()), !dbg {{\![0-9]+}}
+ let () = Printf.printf "%s\n" (Llvm.string_of_lldbgrecord argdi) in
+ (* CHECK: dbg_declare(i32 %0, ![[#]], !DIExpression(), ![[#]])
*)
()
diff --git a/llvm/test/Bindings/llvm-c/echo.ll b/llvm/test/Bindings/llvm-c/echo.ll
index be02075..953a16b 100644
--- a/llvm/test/Bindings/llvm-c/echo.ll
+++ b/llvm/test/Bindings/llvm-c/echo.ll
@@ -334,6 +334,20 @@ define void @test_fast_math_flags_call_outer(float %a) {
ret void
}
+define void @test_func_prefix_data_01() prefix i32 123 {
+ ret void
+}
+
+define void @test_func_prefix_data_02() prefix i64 2000 {
+ ret void
+}
+
+%func_prolog_struct = type <{ i8, i8, ptr }>
+
+define void @test_func_prologue_data_01() prologue %func_prolog_struct <{ i8 235, i8 8, ptr zeroinitializer}> {
+ ret void
+}
+
!llvm.dbg.cu = !{!0, !2}
!llvm.module.flags = !{!3}
diff --git a/llvm/test/Bitcode/DIExpression-aggresult.ll b/llvm/test/Bitcode/DIExpression-aggresult.ll
index 0b89454..0172182 100644
--- a/llvm/test/Bitcode/DIExpression-aggresult.ll
+++ b/llvm/test/Bitcode/DIExpression-aggresult.ll
@@ -1,4 +1,5 @@
; RUN: llvm-dis -o - %s.bc | FileCheck %s
+; RUN: llvm-dis -o - %s.bc --load-bitcode-into-experimental-debuginfo-iterators=true | FileCheck %s
%class.A = type { i32, i32, i32, i32 }
define void @_Z3fooi(%class.A* sret(%class.A) %agg.result) #0 !dbg !3 {
diff --git a/llvm/test/Bitcode/compatibility-3.6.ll b/llvm/test/Bitcode/compatibility-3.6.ll
index b1f4abf..2190e2f 100644
--- a/llvm/test/Bitcode/compatibility-3.6.ll
+++ b/llvm/test/Bitcode/compatibility-3.6.ll
@@ -1061,16 +1061,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1178,11 +1178,11 @@ define void @intrinsics.codegen() {
; CHECK: attributes #27 = { uwtable }
; CHECK: attributes #28 = { "cpu"="cortex-a8" }
; CHECK: attributes #29 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #30 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #31 = { nounwind memory(argmem: read) }
-; CHECK: attributes #32 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #33 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #34 = { nocallback nounwind }
+; CHECK: attributes #30 = { nounwind memory(argmem: read) }
+; CHECK: attributes #31 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #32 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #33 = { nocallback nounwind }
+; CHECK: attributes #34 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #36 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-3.7.ll b/llvm/test/Bitcode/compatibility-3.7.ll
index 91e55f6..7e59b5c 100644
--- a/llvm/test/Bitcode/compatibility-3.7.ll
+++ b/llvm/test/Bitcode/compatibility-3.7.ll
@@ -1092,16 +1092,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1241,11 +1241,11 @@ define void @misc.metadata() {
; CHECK: attributes #30 = { uwtable }
; CHECK: attributes #31 = { "cpu"="cortex-a8" }
; CHECK: attributes #32 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #33 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #34 = { nounwind memory(argmem: read) }
-; CHECK: attributes #35 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #37 = { nocallback nounwind }
+; CHECK: attributes #33 = { nounwind memory(argmem: read) }
+; CHECK: attributes #34 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #36 = { nocallback nounwind }
+; CHECK: attributes #37 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #39 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-3.8.ll b/llvm/test/Bitcode/compatibility-3.8.ll
index aa4d8b1..ebd1f2f 100644
--- a/llvm/test/Bitcode/compatibility-3.8.ll
+++ b/llvm/test/Bitcode/compatibility-3.8.ll
@@ -1247,16 +1247,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1551,11 +1551,11 @@ normal:
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #42 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-3.9.ll b/llvm/test/Bitcode/compatibility-3.9.ll
index e3c84f6..c34f04c 100644
--- a/llvm/test/Bitcode/compatibility-3.9.ll
+++ b/llvm/test/Bitcode/compatibility-3.9.ll
@@ -1318,16 +1318,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1624,11 +1624,11 @@ declare void @f.writeonly() writeonly
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #43 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-4.0.ll b/llvm/test/Bitcode/compatibility-4.0.ll
index da5ea0e..05bffda 100644
--- a/llvm/test/Bitcode/compatibility-4.0.ll
+++ b/llvm/test/Bitcode/compatibility-4.0.ll
@@ -1318,16 +1318,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1609,7 +1609,7 @@ declare void @f.writeonly() writeonly
;; Constant Expressions
define i8** @constexpr() {
- ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
+ ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, i32 1, i32 2)
ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
}
@@ -1649,11 +1649,11 @@ define i8** @constexpr() {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #43 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-5.0.ll b/llvm/test/Bitcode/compatibility-5.0.ll
index 7a39ae6..0c87228 100644
--- a/llvm/test/Bitcode/compatibility-5.0.ll
+++ b/llvm/test/Bitcode/compatibility-5.0.ll
@@ -1330,16 +1330,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1624,7 +1624,7 @@ declare void @f.speculatable() speculatable
;; Constant Expressions
define i8** @constexpr() {
- ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
+ ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, i32 1, i32 2)
ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
}
@@ -1664,11 +1664,11 @@ define i8** @constexpr() {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { speculatable }
; CHECK: attributes #43 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
diff --git a/llvm/test/Bitcode/compatibility-6.0.ll b/llvm/test/Bitcode/compatibility-6.0.ll
index 4cb1f3b..44c6808 100644
--- a/llvm/test/Bitcode/compatibility-6.0.ll
+++ b/llvm/test/Bitcode/compatibility-6.0.ll
@@ -1340,16 +1340,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1634,7 +1634,7 @@ declare void @f.speculatable() speculatable
;; Constant Expressions
define i8** @constexpr() {
- ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
+ ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, i32 1, i32 2)
ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
}
@@ -1674,11 +1674,11 @@ define i8** @constexpr() {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { speculatable }
; CHECK: attributes #43 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll
index ce6a657..b374924 100644
--- a/llvm/test/Bitcode/compatibility.ll
+++ b/llvm/test/Bitcode/compatibility.ll
@@ -1648,16 +1648,16 @@ define void @instructions.va_arg(ptr %v, ...) {
%ap = alloca ptr
call void @llvm.va_start(ptr %ap)
- ; CHECK: call void @llvm.va_start(ptr %ap)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap)
va_arg ptr %ap, i32
; CHECK: va_arg ptr %ap, i32
call void @llvm.va_copy(ptr %v, ptr %ap)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap)
call void @llvm.va_end(ptr %ap)
- ; CHECK: call void @llvm.va_end(ptr %ap)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap)
ret void
}
@@ -1941,8 +1941,8 @@ declare void @f.speculatable() speculatable
;; Constant Expressions
define ptr @constexpr() {
- ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
- ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
+ ; CHECK: ret ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, i32 1, i32 2)
+ ret ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, i32 1, i32 2)
}
define void @instructions.strictfp() strictfp {
@@ -2091,12 +2091,12 @@ define float @nofpclass_callsites(float %arg) {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
-; CHECK: attributes #41 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
+; CHECK: attributes #41 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #42 = { memory(write) }
; CHECK: attributes #43 = { speculatable }
; CHECK: attributes #44 = { strictfp }
diff --git a/llvm/test/Bitcode/dbg-record-roundtrip.ll b/llvm/test/Bitcode/dbg-record-roundtrip.ll
new file mode 100644
index 0000000..bd347ca
--- /dev/null
+++ b/llvm/test/Bitcode/dbg-record-roundtrip.ll
@@ -0,0 +1,172 @@
+;; Roundtrip tests.
+
+;; Load RemoveDIs mode in llvm-dis but write out debug intrinsics.
+; RUN: llvm-as --write-experimental-debuginfo-iterators-to-bitcode=true %s -o - \
+; RUN: | llvm-dis --load-bitcode-into-experimental-debuginfo-iterators=true --write-experimental-debuginfo=false \
+; RUN: | FileCheck %s
+
+;; Load and write RemoveDIs mode in llvm-dis.
+; RUN: llvm-as --write-experimental-debuginfo-iterators-to-bitcode=true %s -o - \
+; RUN: | llvm-dis --load-bitcode-into-experimental-debuginfo-iterators=true --write-experimental-debuginfo=true \
+; RUN: | FileCheck %s --check-prefixes=RECORDS
+
+;; Load intrinsics directly into the new format (auto-upgrade).
+; RUN: llvm-as --write-experimental-debuginfo-iterators-to-bitcode=false %s -o - \
+; RUN: | llvm-dis --load-bitcode-into-experimental-debuginfo-iterators=true --write-experimental-debuginfo=true \
+; RUN: | FileCheck %s --check-prefixes=RECORDS
+
+;; Check that verify-uselistorder passes regardless of input format.
+; RUN: llvm-as %s --write-experimental-debuginfo-iterators-to-bitcode=true -o - | verify-uselistorder
+; RUN: verify-uselistorder %s
+
+;; Confirm we're producing RemoveDI records from various tools.
+; RUN: opt %s -o - --write-experimental-debuginfo-iterators-to-bitcode=true | llvm-bcanalyzer - | FileCheck %s --check-prefix=BITCODE
+; RUN: llvm-as %s -o - --write-experimental-debuginfo-iterators-to-bitcode=true | llvm-bcanalyzer - | FileCheck %s --check-prefix=BITCODE
+; BITCODE-DAG: DEBUG_RECORD_LABEL
+; BITCODE-DAG: DEBUG_RECORD_VALUE
+; BITCODE-DAG: DEBUG_RECORD_ASSIGN
+; BITCODE-DAG: DEBUG_RECORD_DECLARE
+
+;; Check that llvm-link doesn't explode if we give it different formats to
+;; link.
+;; NOTE: This test fails intermittently on linux if the llvm-as output is piped
+;; into llvm-link in the RUN lines below, unless the verify-uselistorder RUN
+;; lines above are removed. Write to a temporary file to avoid that weirdness.
+;; NOTE2: Unfortunately, the above only stopped it occuring on my machine.
+;; It failed again intermittently here:
+;; https://lab.llvm.org/buildbot/#/builders/245/builds/21930
+;; Allow this test to fail-over twice, until this strangeness is understood.
+; ALLOW_RETRIES: 2
+; RUN: llvm-as %s --experimental-debuginfo-iterators=true --write-experimental-debuginfo-iterators-to-bitcode=true -o %t
+; RUN: llvm-link %t %s --experimental-debuginfo-iterators=false -o /dev/null
+; RUN: llvm-as %s --experimental-debuginfo-iterators=false -o %t
+; RUN: llvm-link %t %s --experimental-debuginfo-iterators=true
+
+;; Checks inline.
+
+@g = internal dso_local global i32 0, align 4, !dbg !0
+
+define internal dso_local noundef i32 @_Z3funv(i32 %p, ptr %storage) !dbg !13 {
+entry:
+;; Dbg record at top of block, check dbg.value configurations.
+; CHECK: entry:
+; CHECK-NEXT: dbg.value(metadata i32 %p, metadata ![[e:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg:[0-9]+]]
+; CHECK-NEXT: dbg.value(metadata ![[empty:[0-9]+]], metadata ![[e]], metadata !DIExpression()), !dbg ![[dbg]]
+; CHECK-NEXT: dbg.value(metadata i32 poison, metadata ![[e]], metadata !DIExpression()), !dbg ![[dbg]]
+; CHECK-NEXT: dbg.value(metadata i32 1, metadata ![[f:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg]]
+; RECORDS: entry:
+; RECORDS-NEXT: dbg_value(i32 %p, ![[e:[0-9]+]], !DIExpression(), ![[dbg:[0-9]+]])
+; RECORDS-NEXT: dbg_value(![[empty:[0-9]+]], ![[e]], !DIExpression(), ![[dbg]])
+; RECORDS-NEXT: dbg_value(i32 poison, ![[e]], !DIExpression(), ![[dbg]])
+; RECORDS-NEXT: dbg_value(i32 1, ![[f:[0-9]+]], !DIExpression(), ![[dbg]])
+ tail call void @llvm.dbg.value(metadata i32 %p, metadata !32, metadata !DIExpression()), !dbg !19
+ tail call void @llvm.dbg.value(metadata !29, metadata !32, metadata !DIExpression()), !dbg !19
+ tail call void @llvm.dbg.value(metadata i32 poison, metadata !32, metadata !DIExpression()), !dbg !19
+ tail call void @llvm.dbg.value(metadata i32 1, metadata !33, metadata !DIExpression()), !dbg !19
+;; Arglist with an argument, constant, local use before def, poison.
+; CHECK-NEXT: dbg.value(metadata !DIArgList(i32 %p, i32 0, i32 %0, i32 poison), metadata ![[f]], metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_LLVM_arg, 2, DW_OP_LLVM_arg, 3, DW_OP_plus, DW_OP_minus)), !dbg ![[dbg]]
+; RECORDS-NEXT: dbg_value(!DIArgList(i32 %p, i32 0, i32 %0, i32 poison), ![[f]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_LLVM_arg, 2, DW_OP_LLVM_arg, 3, DW_OP_plus, DW_OP_minus), ![[dbg]])
+ tail call void @llvm.dbg.value(metadata !DIArgList(i32 %p, i32 0, i32 %0, i32 poison), metadata !33, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_LLVM_arg, 2, DW_OP_LLVM_arg, 3, DW_OP_plus, DW_OP_minus)), !dbg !19
+;; Check dbg.assign use before def (value, addr and ID). Check expression order too.
+; CHECK: dbg.assign(metadata i32 %0, metadata ![[i:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 0),
+; CHECK-SAME: metadata ![[ID:[0-9]+]], metadata ptr %a, metadata !DIExpression(DW_OP_plus_uconst, 1)), !dbg ![[dbg]]
+; RECORDS: dbg_assign(i32 %0, ![[i:[0-9]+]], !DIExpression(DW_OP_plus_uconst, 0),
+; RECORDS-SAME: ![[ID:[0-9]+]], ptr %a, !DIExpression(DW_OP_plus_uconst, 1), ![[dbg]])
+ tail call void @llvm.dbg.assign(metadata i32 %0, metadata !36, metadata !DIExpression(DW_OP_plus_uconst, 0), metadata !37, metadata ptr %a, metadata !DIExpression(DW_OP_plus_uconst, 1)), !dbg !19
+ %a = alloca i32, align 4, !DIAssignID !37
+; CHECK: %a = alloca i32, align 4, !DIAssignID ![[ID]]
+;; Check dbg.declare configurations.
+; CHECK-NEXT: dbg.declare(metadata ptr %a, metadata ![[a:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg]]
+; CHECK-NEXT: dbg.declare(metadata ![[empty:[0-9]+]], metadata ![[b:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg]]
+; CHECK-NEXT: dbg.declare(metadata ptr poison, metadata ![[c:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg]]
+; CHECK-NEXT: dbg.declare(metadata ptr null, metadata ![[d:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg]]
+; CHECK-NEXT: dbg.declare(metadata ptr @g, metadata ![[h:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg]]
+; RECORDS: %a = alloca i32, align 4, !DIAssignID ![[ID]]
+;; Check dbg.declare configurations.
+; RECORDS-NEXT: dbg_declare(ptr %a, ![[a:[0-9]+]], !DIExpression(), ![[dbg]])
+; RECORDS-NEXT: dbg_declare(![[empty:[0-9]+]], ![[b:[0-9]+]], !DIExpression(), ![[dbg]])
+; RECORDS-NEXT: dbg_declare(ptr poison, ![[c:[0-9]+]], !DIExpression(), ![[dbg]])
+; RECORDS-NEXT: dbg_declare(ptr null, ![[d:[0-9]+]], !DIExpression(), ![[dbg]])
+; RECORDS-NEXT: dbg_declare(ptr @g, ![[h:[0-9]+]], !DIExpression(), ![[dbg]])
+ tail call void @llvm.dbg.declare(metadata ptr %a, metadata !17, metadata !DIExpression()), !dbg !19
+ tail call void @llvm.dbg.declare(metadata !29, metadata !28, metadata !DIExpression()), !dbg !19
+ tail call void @llvm.dbg.declare(metadata ptr poison, metadata !30, metadata !DIExpression()), !dbg !19
+ tail call void @llvm.dbg.declare(metadata ptr null, metadata !31, metadata !DIExpression()), !dbg !19
+ tail call void @llvm.dbg.declare(metadata ptr @g, metadata !35, metadata !DIExpression()), !dbg !19
+;; Argument value dbg.declare.
+; CHECK: dbg.declare(metadata ptr %storage, metadata ![[g:[0-9]+]], metadata !DIExpression()), !dbg ![[dbg]]
+; RECORDS: dbg_declare(ptr %storage, ![[g:[0-9]+]], !DIExpression(), ![[dbg]])
+ tail call void @llvm.dbg.declare(metadata ptr %storage, metadata !34, metadata !DIExpression()), !dbg !19
+;; Use before def dbg.value.
+; CHECK: dbg.value(metadata i32 %0, metadata ![[e]], metadata !DIExpression()), !dbg ![[dbg]]
+; RECORDS: dbg_value(i32 %0, ![[e]], !DIExpression(), ![[dbg]])
+ tail call void @llvm.dbg.value(metadata i32 %0, metadata !32, metadata !DIExpression()), !dbg !19
+ %0 = load i32, ptr @g, align 4, !dbg !20
+;; Non-argument local value dbg.value.
+; CHECK: dbg.value(metadata i32 %0, metadata ![[e]], metadata !DIExpression()), !dbg ![[dbg]]
+; RECORDS: dbg_value(i32 %0, ![[e]], !DIExpression(), ![[dbg]])
+ tail call void @llvm.dbg.value(metadata i32 %0, metadata !32, metadata !DIExpression()), !dbg !19
+ store i32 %0, ptr %a, align 4, !dbg !19
+ %1 = load i32, ptr %a, align 4, !dbg !25
+; CHECK: dbg.label(metadata ![[label:[0-9]+]]), !dbg ![[dbg]]
+; RECORDS: dbg_label(![[label:[0-9]+]], ![[dbg]])
+ tail call void @llvm.dbg.label(metadata !38), !dbg !19
+ ret i32 %1, !dbg !27
+}
+
+; CHECK-DAG: ![[a]] = !DILocalVariable(name: "a",
+; CHECK-DAG: ![[b]] = !DILocalVariable(name: "b",
+; CHECK-DAG: ![[c]] = !DILocalVariable(name: "c",
+; CHECK-DAG: ![[d]] = !DILocalVariable(name: "d",
+; CHECK-DAG: ![[e]] = !DILocalVariable(name: "e",
+; CHECK-DAG: ![[f]] = !DILocalVariable(name: "f",
+; CHECK-DAG: ![[g]] = !DILocalVariable(name: "g",
+; CHECK-DAG: ![[h]] = !DILocalVariable(name: "h",
+; CHECK-DAG: ![[i]] = !DILocalVariable(name: "i",
+; CHECK-DAG: ![[empty]] = !{}
+; CHECK-DAG: ![[label]] = !DILabel
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata)
+declare void @llvm.dbg.label(metadata)
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!6, !7, !8, !9, !10, !11}
+!llvm.ident = !{!12}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "g", scope: !2, file: !3, line: 1, type: !5, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !3, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: None)
+!3 = !DIFile(filename: "test.cpp", directory: "/")
+!4 = !{!0}
+!5 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!6 = !{i32 7, !"Dwarf Version", i32 5}
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = !{i32 1, !"wchar_size", i32 4}
+!9 = !{i32 8, !"PIC Level", i32 2}
+!10 = !{i32 7, !"PIE Level", i32 2}
+!11 = !{i32 7, !"uwtable", i32 2}
+!12 = !{!"clang version 19.0.0"}
+!13 = distinct !DISubprogram(name: "fun", linkageName: "_Z3funv", scope: !3, file: !3, line: 2, type: !14, scopeLine: 2, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2, retainedNodes: !16)
+!14 = !DISubroutineType(types: !15)
+!15 = !{!5}
+!16 = !{!17}
+!17 = !DILocalVariable(name: "a", scope: !13, file: !3, line: 3, type: !5)
+!18 = !DILocation(line: 3, column: 3, scope: !13)
+!19 = !DILocation(line: 3, column: 7, scope: !13)
+!20 = !DILocation(line: 3, column: 11, scope: !13)
+!25 = !DILocation(line: 4, column: 12, scope: !13)
+!26 = !DILocation(line: 5, column: 1, scope: !13)
+!27 = !DILocation(line: 4, column: 5, scope: !13)
+!28 = !DILocalVariable(name: "b", scope: !13, file: !3, line: 3, type: !5)
+!29 = !{}
+!30 = !DILocalVariable(name: "c", scope: !13, file: !3, line: 3, type: !5)
+!31 = !DILocalVariable(name: "d", scope: !13, file: !3, line: 3, type: !5)
+!32 = !DILocalVariable(name: "e", scope: !13, file: !3, line: 3, type: !5)
+!33 = !DILocalVariable(name: "f", scope: !13, file: !3, line: 3, type: !5)
+!34 = !DILocalVariable(name: "g", scope: !13, file: !3, line: 3, type: !5)
+!35 = !DILocalVariable(name: "h", scope: !13, file: !3, line: 3, type: !5)
+!36 = !DILocalVariable(name: "i", scope: !13, file: !3, line: 3, type: !5)
+!37 = distinct !DIAssignID()
+!38 = !DILabel(scope: !13, name: "label", file: !3, line: 1)
diff --git a/llvm/test/Bitcode/flags.ll b/llvm/test/Bitcode/flags.ll
index e3fc827..96995ec 100644
--- a/llvm/test/Bitcode/flags.ll
+++ b/llvm/test/Bitcode/flags.ll
@@ -20,17 +20,34 @@ second: ; preds = %first
%ll = zext i32 %s to i64
%jj = or disjoint i32 %a, 0
%oo = or i32 %a, 0
+ %tu = trunc nuw i32 %a to i16
+ %ts = trunc nsw i32 %a to i16
+ %tus = trunc nuw nsw i32 %a to i16
+ %t = trunc i32 %a to i16
+ %tuv = trunc nuw <2 x i32> %aa to <2 x i16>
+ %tsv = trunc nsw <2 x i32> %aa to <2 x i16>
+ %tusv = trunc nuw nsw <2 x i32> %aa to <2 x i16>
+ %tv = trunc <2 x i32> %aa to <2 x i16>
unreachable
-first: ; preds = %entry
- %a = bitcast i32 0 to i32 ; <i32> [#uses=8]
- %uu = add nuw i32 %a, 0 ; <i32> [#uses=0]
- %ss = add nsw i32 %a, 0 ; <i32> [#uses=0]
- %uuss = add nuw nsw i32 %a, 0 ; <i32> [#uses=0]
- %zz = add i32 %a, 0 ; <i32> [#uses=0]
+first: ; preds = %entry
+ %aa = bitcast <2 x i32> <i32 0, i32 0> to <2 x i32>
+ %a = bitcast i32 0 to i32 ; <i32> [#uses=8]
+ %uu = add nuw i32 %a, 0 ; <i32> [#uses=0]
+ %ss = add nsw i32 %a, 0 ; <i32> [#uses=0]
+ %uuss = add nuw nsw i32 %a, 0 ; <i32> [#uses=0]
+ %zz = add i32 %a, 0 ; <i32> [#uses=0]
%kk = zext nneg i32 %a to i64
%rr = zext i32 %ss to i64
%mm = or disjoint i32 %a, 0
%nn = or i32 %a, 0
+ %tuu = trunc nuw i32 %a to i16
+ %tss = trunc nsw i32 %a to i16
+ %tuss = trunc nuw nsw i32 %a to i16
+ %tt = trunc i32 %a to i16
+ %ttuv = trunc nuw <2 x i32> %aa to <2 x i16>
+ %ttsv = trunc nsw <2 x i32> %aa to <2 x i16>
+ %ttusv = trunc nuw nsw <2 x i32> %aa to <2 x i16>
+ %ttv = trunc <2 x i32> %aa to <2 x i16>
br label %second
}
diff --git a/llvm/test/Bitcode/thinlto-func-summary-vtableref-pgo.ll b/llvm/test/Bitcode/thinlto-func-summary-vtableref-pgo.ll
new file mode 100644
index 0000000..ba3ce9a
--- /dev/null
+++ b/llvm/test/Bitcode/thinlto-func-summary-vtableref-pgo.ll
@@ -0,0 +1,74 @@
+; Promote at most one function and annotate at most one vtable.
+; As a result, only one value (of each relevant kind) shows up in the function
+; summary.
+
+; RUN: opt -module-summary -icp-max-num-vtables=1 -icp-max-prom=1 %s -o %t.o
+
+; RUN: llvm-bcanalyzer -dump %t.o | FileCheck %s
+
+; RUN: llvm-dis -o - %t.o | FileCheck %s --check-prefix=DIS
+; Round trip it through llvm-as
+; RUN: llvm-dis -o - %t.o | llvm-as -o - | llvm-dis -o - | FileCheck %s --check-prefix=DIS
+
+; CHECK: <GLOBALVAL_SUMMARY_BLOCK
+; CHECK-NEXT: <VERSION op0=9/>
+; CHECK-NEXT: <FLAGS op0=0/>
+; The `VALUE_GUID` below represents the "_ZTV4Base" referenced by the instruction
+; that loads vtable pointers.
+; CHECK-NEXT: <VALUE_GUID op0=21 op1=1960855528937986108/>
+; The `VALUE_GUID` below represents the "_ZN4Base4funcEv" referenced by the
+; indirect call instruction.
+; CHECK-NEXT: <VALUE_GUID op0=20 op1=5459407273543877811/>
+; NOTE vtables and functions from Derived class is dropped because
+; `-icp-max-num-vtables` and `-icp-max-prom` are both set to one.
+; <PERMODULE_PROFILE> has the format [valueid, flags, instcount, funcflags,
+; numrefs, rorefcnt, worefcnt,
+; m x valueid,
+; n x (valueid, hotness+tailcall)]
+; CHECK-NEXT: <PERMODULE_PROFILE abbrevid=4 op0=0 op1=0 op2=4 op3=256 op4=1 op5=1 op6=0 op7=21 op8=20 op9=3/>
+; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function has one BB and an entry count of 150, so the BB is hot according to
+; ProfileSummary and reflected so in the bitcode (see llvm-dis output).
+define i32 @_Z4testP4Base(ptr %0) !prof !15 {
+ %2 = load ptr, ptr %0, !prof !16
+ %3 = load ptr, ptr %2
+ %4 = tail call i32 %3(ptr %0), !prof !17
+ ret i32 %4
+}
+
+!llvm.module.flags = !{!1}
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 200}
+!6 = !{!"MaxInternalCount", i64 200}
+!7 = !{!"MaxFunctionCount", i64 200}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 990000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+
+!15 = !{!"function_entry_count", i32 150}
+; 1960855528937986108 is the MD5 hash of _ZTV4Base, and
+; 13870436605473471591 is the MD5 hash of _ZTV7Derived
+!16 = !{!"VP", i32 2, i64 150, i64 1960855528937986108, i64 100, i64 13870436605473471591, i64 50}
+; 5459407273543877811 is the MD5 hash of _ZN4Base4funcEv, and
+; 6174874150489409711 is the MD5 hash of _ZN7Derived4funcEv
+!17 = !{!"VP", i32 0, i64 150, i64 5459407273543877811, i64 100, i64 6174874150489409711, i64 50}
+
+; ModuleSummaryIndex stores <guid, global-value summary> map in std::map; so
+; global value summares are printed out in the order that gv's guid increases.
+; DIS: ^0 = module: (path: "{{.*}}", hash: (0, 0, 0, 0, 0))
+; DIS: ^1 = gv: (guid: 1960855528937986108)
+; DIS: ^2 = gv: (guid: 5459407273543877811)
+; DIS: ^3 = gv: (name: "_Z4testP4Base", summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0), insts: 4, funcFlags: (readNone: 0, readOnly: 0, noRecurse: 0, returnDoesNotAlias: 0, noInline: 0, alwaysInline: 0, noUnwind: 0, mayThrow: 0, hasUnknownCall: 1, mustBeUnreachable: 0), calls: ((callee: ^2, hotness: hot)), refs: (readonly ^1)))) ; guid = 15857150948103218965
+; DIS: ^4 = blockcount: 0
diff --git a/llvm/test/Bitcode/thinlto-function-summary.ll b/llvm/test/Bitcode/thinlto-function-summary.ll
index 799759e..13c6611 100644
--- a/llvm/test/Bitcode/thinlto-function-summary.ll
+++ b/llvm/test/Bitcode/thinlto-function-summary.ll
@@ -13,9 +13,9 @@
; "variadic"
; BC-NEXT: <FUNCTION op0=46 op1=8
; "llvm.va_start"
-; BC-NEXT: <FUNCTION op0=54 op1=13
+; BC-NEXT: <FUNCTION op0=54 op1=16
; "f"
-; BC-NEXT: <ALIAS op0=67 op1=1
+; BC-NEXT: <ALIAS op0=70 op1=1
; BC: <GLOBALVAL_SUMMARY_BLOCK
; BC-NEXT: <VERSION
; BC-NEXT: <FLAGS
@@ -26,7 +26,7 @@
; BC-NEXT: <ALIAS {{.*}} op0=6 op1=0 op2=3
; BC-NEXT: </GLOBALVAL_SUMMARY_BLOCK
; BC: <STRTAB_BLOCK
-; BC-NEXT: blob data = 'hfoobaranon.{{................................}}.0variadicllvm.va_startf{{.*}}'
+; BC-NEXT: blob data = 'hfoobaranon.{{................................}}.0variadicllvm.va_start.p{{[0-9]+}}f{{.*}}'
; RUN: opt -passes=name-anon-globals -module-summary < %s | llvm-dis | FileCheck %s
diff --git a/llvm/test/Bitcode/upgrade-dbg-addr.ll b/llvm/test/Bitcode/upgrade-dbg-addr.ll
index 40fd7db..06a411c 100644
--- a/llvm/test/Bitcode/upgrade-dbg-addr.ll
+++ b/llvm/test/Bitcode/upgrade-dbg-addr.ll
@@ -1,6 +1,7 @@
; Test upgrade of dbg.addr intrinsics into dbg.value with DW_OP_deref appended
;
; RUN: llvm-dis < %s.bc | FileCheck %s
+; RUN: llvm-dis < %s.bc --load-bitcode-into-experimental-debuginfo-iterators --write-experimental-debuginfo=false | FileCheck %s
; RUN: verify-uselistorder < %s.bc
define i32 @example(i32 %num) {
diff --git a/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll b/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
index fad7b8e..fd3f500 100644
--- a/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
+++ b/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
@@ -10,7 +10,7 @@ define i32 @varArgIntrinsic(i32 %X, ...) {
%ap = alloca i8*
%ap2 = bitcast i8** %ap to i8*
-; CHECK: call void @llvm.va_start(ptr %ap2)
+; CHECK: call void @llvm.va_start.p0(ptr %ap2)
call void @llvm.va_start(i8* %ap2)
; CHECK-NEXT: %tmp = va_arg ptr %ap, i32
@@ -19,12 +19,12 @@ define i32 @varArgIntrinsic(i32 %X, ...) {
%aq = alloca i8*
%aq2 = bitcast i8** %aq to i8*
-; CHECK: call void @llvm.va_copy(ptr %aq2, ptr %ap2)
+; CHECK: call void @llvm.va_copy.p0(ptr %aq2, ptr %ap2)
call void @llvm.va_copy(i8* %aq2, i8* %ap2)
-; CHECK-NEXT: call void @llvm.va_end(ptr %aq2)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr %aq2)
call void @llvm.va_end(i8* %aq2)
-; CHECK-NEXT: call void @llvm.va_end(ptr %ap2)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr %ap2)
call void @llvm.va_end(i8* %ap2)
ret i32 %tmp
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
index 458c2cb..7163da0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
@@ -512,9 +512,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB6_2
; CHECK-NOLSE-O0-NEXT: LBB6_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB6_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-NOLSE-O0-NEXT: b LBB6_5
@@ -540,9 +540,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-OUTLINE-O0-NEXT: mvn w1, w8
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-OUTLINE-O0-NEXT: b LBB6_2
@@ -582,9 +582,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-LSE-O0-NEXT: mvn w10, w9
; CHECK-LSE-O0-NEXT: mov x9, x8
; CHECK-LSE-O0-NEXT: casl w9, w10, [x11]
-; CHECK-LSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: subs w8, w9, w8
; CHECK-LSE-O0-NEXT: cset w8, eq
+; CHECK-LSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-LSE-O0-NEXT: b LBB6_2
@@ -649,9 +649,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB7_2
; CHECK-NOLSE-O0-NEXT: LBB7_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB7_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-NOLSE-O0-NEXT: b LBB7_5
@@ -677,9 +677,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-OUTLINE-O0-NEXT: mvn x1, x8
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #8] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #8] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #24] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-OUTLINE-O0-NEXT: b LBB7_2
@@ -719,9 +719,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-LSE-O0-NEXT: mvn x10, x9
; CHECK-LSE-O0-NEXT: mov x9, x8
; CHECK-LSE-O0-NEXT: casal x9, x10, [x11]
-; CHECK-LSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: subs x8, x9, x8
; CHECK-LSE-O0-NEXT: cset w8, eq
+; CHECK-LSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-LSE-O0-NEXT: b LBB7_2
@@ -782,9 +782,9 @@ define i32 @fetch_and_or(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB8_2
; CHECK-NOLSE-O0-NEXT: LBB8_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB8_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB8_1
; CHECK-NOLSE-O0-NEXT: b LBB8_5
@@ -855,9 +855,9 @@ define i64 @fetch_and_or_64(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB9_2
; CHECK-NOLSE-O0-NEXT: LBB9_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB9_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB9_1
; CHECK-NOLSE-O0-NEXT: b LBB9_5
@@ -4005,9 +4005,9 @@ define i32 @atomicrmw_add_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB47_2
; CHECK-NOLSE-O0-NEXT: LBB47_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB47_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB47_1
; CHECK-NOLSE-O0-NEXT: b LBB47_5
@@ -4097,9 +4097,9 @@ define i32 @atomicrmw_xchg_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB48_2
; CHECK-NOLSE-O0-NEXT: LBB48_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB48_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB48_1
; CHECK-NOLSE-O0-NEXT: b LBB48_5
@@ -4190,9 +4190,9 @@ define i32 @atomicrmw_sub_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB49_2
; CHECK-NOLSE-O0-NEXT: LBB49_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB49_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB49_1
; CHECK-NOLSE-O0-NEXT: b LBB49_5
@@ -4287,9 +4287,9 @@ define i32 @atomicrmw_and_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB50_2
; CHECK-NOLSE-O0-NEXT: LBB50_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB50_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB50_1
; CHECK-NOLSE-O0-NEXT: b LBB50_5
@@ -4384,9 +4384,9 @@ define i32 @atomicrmw_or_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB51_2
; CHECK-NOLSE-O0-NEXT: LBB51_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB51_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB51_1
; CHECK-NOLSE-O0-NEXT: b LBB51_5
@@ -4477,9 +4477,9 @@ define i32 @atomicrmw_xor_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB52_2
; CHECK-NOLSE-O0-NEXT: LBB52_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB52_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB52_1
; CHECK-NOLSE-O0-NEXT: b LBB52_5
@@ -4572,9 +4572,9 @@ define i32 @atomicrmw_min_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB53_2
; CHECK-NOLSE-O0-NEXT: LBB53_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB53_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB53_1
; CHECK-NOLSE-O0-NEXT: b LBB53_5
@@ -4605,9 +4605,9 @@ define i32 @atomicrmw_min_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, le
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_acq
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB53_1
; CHECK-OUTLINE-O0-NEXT: b LBB53_2
@@ -4686,9 +4686,9 @@ define i32 @atomicrmw_max_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB54_2
; CHECK-NOLSE-O0-NEXT: LBB54_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB54_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB54_1
; CHECK-NOLSE-O0-NEXT: b LBB54_5
@@ -4719,9 +4719,9 @@ define i32 @atomicrmw_max_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, gt
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB54_1
; CHECK-OUTLINE-O0-NEXT: b LBB54_2
@@ -4800,9 +4800,9 @@ define i32 @atomicrmw_umin_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB55_2
; CHECK-NOLSE-O0-NEXT: LBB55_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB55_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB55_1
; CHECK-NOLSE-O0-NEXT: b LBB55_5
@@ -4833,9 +4833,9 @@ define i32 @atomicrmw_umin_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, ls
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB55_1
; CHECK-OUTLINE-O0-NEXT: b LBB55_2
@@ -4914,9 +4914,9 @@ define i32 @atomicrmw_umax_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB56_2
; CHECK-NOLSE-O0-NEXT: LBB56_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB56_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB56_1
; CHECK-NOLSE-O0-NEXT: b LBB56_5
@@ -4947,9 +4947,9 @@ define i32 @atomicrmw_umax_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, hi
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_relax
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB56_1
; CHECK-OUTLINE-O0-NEXT: b LBB56_2
@@ -5026,9 +5026,9 @@ define i64 @atomicrmw_add_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB57_2
; CHECK-NOLSE-O0-NEXT: LBB57_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB57_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB57_1
; CHECK-NOLSE-O0-NEXT: b LBB57_5
@@ -5117,9 +5117,9 @@ define i64 @atomicrmw_xchg_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB58_2
; CHECK-NOLSE-O0-NEXT: LBB58_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB58_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB58_1
; CHECK-NOLSE-O0-NEXT: b LBB58_5
@@ -5210,9 +5210,9 @@ define i64 @atomicrmw_sub_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB59_2
; CHECK-NOLSE-O0-NEXT: LBB59_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB59_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB59_1
; CHECK-NOLSE-O0-NEXT: b LBB59_5
@@ -5307,9 +5307,9 @@ define i64 @atomicrmw_and_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB60_2
; CHECK-NOLSE-O0-NEXT: LBB60_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB60_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB60_1
; CHECK-NOLSE-O0-NEXT: b LBB60_5
@@ -5404,9 +5404,9 @@ define i64 @atomicrmw_or_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB61_2
; CHECK-NOLSE-O0-NEXT: LBB61_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB61_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB61_1
; CHECK-NOLSE-O0-NEXT: b LBB61_5
@@ -5497,9 +5497,9 @@ define i64 @atomicrmw_xor_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB62_2
; CHECK-NOLSE-O0-NEXT: LBB62_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB62_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB62_1
; CHECK-NOLSE-O0-NEXT: b LBB62_5
@@ -5592,9 +5592,9 @@ define i64 @atomicrmw_min_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB63_2
; CHECK-NOLSE-O0-NEXT: LBB63_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB63_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB63_1
; CHECK-NOLSE-O0-NEXT: b LBB63_5
@@ -5625,9 +5625,9 @@ define i64 @atomicrmw_min_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, le
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB63_1
; CHECK-OUTLINE-O0-NEXT: b LBB63_2
@@ -5706,9 +5706,9 @@ define i64 @atomicrmw_max_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB64_2
; CHECK-NOLSE-O0-NEXT: LBB64_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB64_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB64_1
; CHECK-NOLSE-O0-NEXT: b LBB64_5
@@ -5739,9 +5739,9 @@ define i64 @atomicrmw_max_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, gt
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB64_1
; CHECK-OUTLINE-O0-NEXT: b LBB64_2
@@ -5820,9 +5820,9 @@ define i64 @atomicrmw_umin_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB65_2
; CHECK-NOLSE-O0-NEXT: LBB65_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB65_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB65_1
; CHECK-NOLSE-O0-NEXT: b LBB65_5
@@ -5853,9 +5853,9 @@ define i64 @atomicrmw_umin_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, ls
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB65_1
; CHECK-OUTLINE-O0-NEXT: b LBB65_2
@@ -5934,9 +5934,9 @@ define i64 @atomicrmw_umax_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB66_2
; CHECK-NOLSE-O0-NEXT: LBB66_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB66_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB66_1
; CHECK-NOLSE-O0-NEXT: b LBB66_5
@@ -5967,9 +5967,9 @@ define i64 @atomicrmw_umax_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, hi
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_relax
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB66_1
; CHECK-OUTLINE-O0-NEXT: b LBB66_2
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
index a2116cc..c2a38e2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-extract-vec-elt.mir
@@ -192,8 +192,8 @@ body: |
...
---
+# This test checks that this combine runs after the insertvec->build_vector
name: extract_from_insert
-alignment: 4
tracksRegLiveness: true
liveins:
- { reg: '$x0' }
@@ -203,8 +203,6 @@ frameInfo:
body: |
bb.1:
liveins: $x0, $x1
- ; This test checks that this combine runs after the insertvec->build_vector
- ; combine.
; CHECK-LABEL: name: extract_from_insert
; CHECK: liveins: $x0, $x1
; CHECK-NEXT: {{ $}}
@@ -247,3 +245,298 @@ body: |
RET_ReallyLR implicit $x0
...
+---
+name: extract_from_vector_undef
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_vector_undef
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+ %idx:_(s32) = G_CONSTANT i32 -2
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_index_undef
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: extract_from_index_undef
+ ; CHECK: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_IMPLICIT_DEF
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_index_too_large
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_index_too_large
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %extract:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 3000
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_with_freeze
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_with_freeze
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vec:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx(s32)
+ ; CHECK-NEXT: %extract:_(s64) = G_FREEZE [[EVEC]]
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w1
+ %fvec:_(<2 x s64>) = G_FREEZE %vec
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %fvec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_insert_symmetry
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_insert_symmetry
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %element:_(s64) = COPY $x1
+ ; CHECK-NEXT: $x0 = COPY %element(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w1
+ %element:_(s64) = COPY $x1
+ %invec:_(<2 x s64>) = G_INSERT_VECTOR_ELT %vec(<2 x s64>), %element(s64), %idx(s32)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %invec(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_insert_with_different_consts
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_insert_with_different_consts
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vec:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: %idx2:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %extract:_(s64) = G_EXTRACT_VECTOR_ELT %vec(<2 x s64>), %idx2(s32)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 0
+ %idx2:_(s32) = G_CONSTANT i32 1
+ %element:_(s64) = COPY $x1
+ %invec:_(<2 x s64>) = G_INSERT_VECTOR_ELT %vec(<2 x s64>), %element(s64), %idx(s32)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %invec(<2 x s64>), %idx2(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_non_const
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_non_const
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w0
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %arg2:_(s64) = COPY $x1
+ ; CHECK-NEXT: %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ ; CHECK-NEXT: %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = COPY $w0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_const
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_const
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: $x0 = COPY %arg1(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %idx:_(s32) = G_CONSTANT i32 0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: extract_from_build_vector_trunc_const2
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_trunc_const2
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %extract:_(s32) = G_TRUNC %arg1(s64)
+ ; CHECK-NEXT: $w0 = COPY %extract(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %vec:_(<2 x s64>) = COPY $q0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %arg3:_(s64) = COPY $x0
+ %arg4:_(s64) = COPY $x1
+ %idx:_(s32) = G_CONSTANT i32 0
+ %bv:_(<4 x s32>) = G_BUILD_VECTOR_TRUNC %arg1(s64), %arg2(s64), %arg3(s64), %arg4(s64)
+ %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<4 x s32>), %idx(s32)
+ $w0 = COPY %extract(s32)
+ RET_ReallyLR implicit $x0
+...
+---
+name: extract_from_build_vector_trunc2
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_trunc2
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %arg2:_(s64) = COPY $x1
+ ; CHECK-NEXT: %idx:_(s32) = COPY $w0
+ ; CHECK-NEXT: %bv:_(<2 x s32>) = G_BUILD_VECTOR_TRUNC %arg1(s64), %arg2(s64)
+ ; CHECK-NEXT: %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %idx(s32)
+ ; CHECK-NEXT: $w0 = COPY %extract(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %arg1:_(s64) = COPY $x0
+ %arg2:_(s64) = COPY $x1
+ %idx:_(s32) = COPY $w0
+ %bv:_(<2 x s32>) = G_BUILD_VECTOR_TRUNC %arg1(s64), %arg2(s64)
+ %extract:_(s32) = G_EXTRACT_VECTOR_ELT %bv(<2 x s32>), %idx(s32)
+ $w0 = COPY %extract(s32)
+ RET_ReallyLR implicit $x0
+...
+---
+name: extract_from_build_vector_trunc_const3
+alignment: 4
+liveins:
+ - { reg: '$x0' }
+ - { reg: '$x1' }
+frameInfo:
+ maxAlignment: 1
+body: |
+ bb.1:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: extract_from_build_vector_trunc_const3
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %arg1:_(s128) = COPY $q0
+ ; CHECK-NEXT: %extract:_(s64) = G_TRUNC %arg1(s128)
+ ; CHECK-NEXT: $x0 = COPY %extract(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %arg1:_(s128) = COPY $q0
+ %arg2:_(s128) = COPY $q1
+ %idx:_(s32) = G_CONSTANT i32 0
+ %bv:_(<2 x s64>) = G_BUILD_VECTOR_TRUNC %arg1(s128), %arg2(s128)
+ %extract:_(s64) = G_EXTRACT_VECTOR_ELT %bv(<2 x s64>), %idx(s32)
+ $x0 = COPY %extract(s64)
+ RET_ReallyLR implicit $x0
+...
+---
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir
new file mode 100644
index 0000000..ec66892
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir
@@ -0,0 +1,178 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=aarch64-prelegalizer-combiner -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s
+
+---
+name: add_unused
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_unused
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %add:_(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %add:_(s32), %o:_(s1) = G_SADDO %0, %1
+ $w0 = COPY %add(s32)
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_canon
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_canon
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %const:_(s32) = G_CONSTANT i32 10
+ ; CHECK-NEXT: %add:_(s32), %o:_(s1) = G_SADDO [[COPY]], %const
+ ; CHECK-NEXT: %o_wide:_(s32) = G_ZEXT %o(s1)
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: $w1 = COPY %o_wide(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %const:_(s32) = G_CONSTANT i32 10
+ %add:_(s32), %o:_(s1) = G_SADDO %const, %1
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_const_fold
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_const_fold
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %add:_(s32) = G_CONSTANT i32 21
+ ; CHECK-NEXT: %o_wide:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: $w1 = COPY %o_wide(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %const:_(s32) = G_CONSTANT i32 10
+ %const1:_(s32) = G_CONSTANT i32 11
+ %add:_(s32), %o:_(s1) = G_UADDO %const, %const1
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_add_zero
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_add_zero
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w1 = COPY [[C]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %const:_(s32) = G_CONSTANT i32 10
+ %addl:_(s32) = nsw G_ADD %2, %const
+ %const1:_(s32) = G_CONSTANT i32 -10
+ %add:_(s32), %o:_(s1) = G_SADDO %addl, %const1
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_multiuse
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_multiuse
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: %const:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w1 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w2 = COPY %const(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %const:_(s32) = G_CONSTANT i32 0
+ %add:_(s32), %o:_(s1) = G_SADDO %0, %const
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %add(s32)
+ $w2 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_vector
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_vector
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $w3
+ ; CHECK-NEXT: %bv0:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: %bv1:_(<4 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32), [[COPY2]](s32), [[COPY3]](s32)
+ ; CHECK-NEXT: %add:_(<4 x s32>), %o:_(<4 x s1>) = G_UADDO %bv0, %bv1
+ ; CHECK-NEXT: %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %add(<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY %o_wide(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %3:_(s32) = COPY $w3
+ %bv0:_(<4 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %0:_(s32), %1:_(s32)
+ %bv1:_(<4 x s32>) = G_BUILD_VECTOR %2:_(s32), %3:_(s32), %2:_(s32), %3:_(s32)
+ %add:_(<4 x s32>), %o:_(<4 x s1>) = G_UADDO %bv0, %bv1
+ %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ $q0 = COPY %add(<4 x s32>)
+ $q1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_splat_vector
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_splat_vector
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %bv0:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: %o:_(<4 x s1>) = G_BUILD_VECTOR [[C]](s1), [[C]](s1), [[C]](s1), [[C]](s1)
+ ; CHECK-NEXT: %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %bv0(<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY %o_wide(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %3:_(s32) = COPY $w3
+ %const:_(s32) = G_CONSTANT i32 0
+ %bv0:_(<4 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %0:_(s32), %1:_(s32)
+ %bv1:_(<4 x s32>) = G_BUILD_VECTOR %const:_(s32), %const:_(s32), %const:_(s32), %const:_(s32)
+ %add:_(<4 x s32>), %o:_(<4 x s1>) = G_SADDO %bv0, %bv1
+ %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ $q0 = COPY %add(<4 x s32>)
+ $q1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
index 8aea944..ceef0c4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
@@ -65,22 +65,17 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) {
; GISEL-NEXT: ushl v1.8h, v0.8h, v1.8h
; GISEL-NEXT: umull2 v3.4s, v1.8h, v2.8h
; GISEL-NEXT: umull v1.4s, v1.4h, v2.4h
-; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI1_1]
+; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI1_1]
; GISEL-NEXT: adrp x8, .LCPI1_0
-; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
-; GISEL-NEXT: umull2 v4.4s, v2.8h, v3.8h
-; GISEL-NEXT: umull v2.4s, v2.4h, v3.4h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI1_0]
-; GISEL-NEXT: adrp x8, .LCPI1_4
-; GISEL-NEXT: uzp2 v2.8h, v2.8h, v4.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI1_4]
-; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
-; GISEL-NEXT: neg v2.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
+; GISEL-NEXT: sub v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: umull2 v3.4s, v0.8h, v2.8h
+; GISEL-NEXT: umull v0.4s, v0.4h, v2.4h
+; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI1_0]
+; GISEL-NEXT: uzp2 v0.8h, v0.8h, v3.8h
+; GISEL-NEXT: add v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: neg v1.8h, v2.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 23, i16 34, i16 -23, i16 56, i16 128, i16 -1, i16 -256, i16 -32768>
ret <8 x i16> %1
@@ -107,21 +102,16 @@ define <8 x i16> @combine_vec_udiv_nonuniform2(<8 x i16> %x) {
; GISEL-NEXT: adrp x8, .LCPI2_2
; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_2]
; GISEL-NEXT: adrp x8, .LCPI2_1
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI2_1]
+; GISEL-NEXT: neg v1.8h, v1.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_1]
; GISEL-NEXT: adrp x8, .LCPI2_0
+; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
+; GISEL-NEXT: umull v0.4s, v0.4h, v1.4h
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_0]
; GISEL-NEXT: neg v1.8h, v1.8h
-; GISEL-NEXT: ushl v1.8h, v0.8h, v1.8h
-; GISEL-NEXT: umull2 v3.4s, v1.8h, v2.8h
-; GISEL-NEXT: umull v1.4s, v1.4h, v2.4h
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI2_0]
-; GISEL-NEXT: adrp x8, .LCPI2_3
-; GISEL-NEXT: neg v2.8h, v2.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI2_3]
-; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: uzp2 v0.8h, v0.8h, v2.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 -34, i16 35, i16 36, i16 -37, i16 38, i16 -39, i16 40, i16 -41>
ret <8 x i16> %1
@@ -145,21 +135,16 @@ define <8 x i16> @combine_vec_udiv_nonuniform3(<8 x i16> %x) {
; GISEL-LABEL: combine_vec_udiv_nonuniform3:
; GISEL: // %bb.0:
; GISEL-NEXT: adrp x8, .LCPI3_1
-; GISEL-NEXT: movi v3.8h, #1
; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI3_1]
; GISEL-NEXT: adrp x8, .LCPI3_0
; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
; GISEL-NEXT: umull v1.4s, v0.4h, v1.4h
; GISEL-NEXT: uzp2 v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
-; GISEL-NEXT: usra v1.8h, v2.8h, #1
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI3_0]
-; GISEL-NEXT: adrp x8, .LCPI3_2
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI3_2]
-; GISEL-NEXT: neg v2.8h, v2.8h
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: sub v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: usra v1.8h, v0.8h, #1
+; GISEL-NEXT: ldr q0, [x8, :lo12:.LCPI3_0]
+; GISEL-NEXT: neg v0.8h, v0.8h
+; GISEL-NEXT: ushl v0.8h, v1.8h, v0.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 7, i16 23, i16 25, i16 27, i16 31, i16 47, i16 63, i16 127>
ret <8 x i16> %1
@@ -184,19 +169,19 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
;
; GISEL-LABEL: combine_vec_udiv_nonuniform4:
; GISEL: // %bb.0:
+; GISEL-NEXT: adrp x8, .LCPI4_2
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI4_2]
; GISEL-NEXT: adrp x8, .LCPI4_1
-; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI4_1]
+; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI4_1]
; GISEL-NEXT: adrp x8, .LCPI4_0
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI4_0]
-; GISEL-NEXT: adrp x8, .LCPI4_2
; GISEL-NEXT: umull2 v2.8h, v0.16b, v1.16b
; GISEL-NEXT: umull v1.8h, v0.8b, v1.8b
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI4_2]
+; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI4_0]
; GISEL-NEXT: uzp2 v1.16b, v1.16b, v2.16b
; GISEL-NEXT: neg v2.16b, v3.16b
-; GISEL-NEXT: movi v3.16b, #1
+; GISEL-NEXT: shl v3.16b, v4.16b, #7
; GISEL-NEXT: ushl v1.16b, v1.16b, v2.16b
-; GISEL-NEXT: cmeq v2.16b, v4.16b, v3.16b
+; GISEL-NEXT: sshr v2.16b, v3.16b, #7
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%div = udiv <16 x i8> %x, <i8 -64, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -232,10 +217,10 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
;
; GISEL-LABEL: pr38477:
; GISEL: // %bb.0:
+; GISEL-NEXT: adrp x8, .LCPI5_3
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI5_3]
; GISEL-NEXT: adrp x8, .LCPI5_2
-; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI5_2]
-; GISEL-NEXT: adrp x8, .LCPI5_1
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_1]
+; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_2]
; GISEL-NEXT: adrp x8, .LCPI5_0
; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
; GISEL-NEXT: umull v1.4s, v0.4h, v1.4h
@@ -243,15 +228,16 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
; GISEL-NEXT: umull2 v4.4s, v2.8h, v3.8h
; GISEL-NEXT: umull v2.4s, v2.4h, v3.4h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_0]
-; GISEL-NEXT: adrp x8, .LCPI5_3
+; GISEL-NEXT: ldr d3, [x8, :lo12:.LCPI5_0]
+; GISEL-NEXT: adrp x8, .LCPI5_1
+; GISEL-NEXT: ushll v3.8h, v3.8b, #0
; GISEL-NEXT: uzp2 v2.8h, v2.8h, v4.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI5_3]
+; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI5_1]
+; GISEL-NEXT: shl v3.8h, v3.8h, #15
; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
-; GISEL-NEXT: neg v2.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
+; GISEL-NEXT: neg v2.8h, v4.8h
; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
+; GISEL-NEXT: sshr v2.8h, v3.8h, #15
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %a0, <i16 1, i16 119, i16 73, i16 -111, i16 -3, i16 118, i16 32, i16 31>
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
index ee33b9c..02233b9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
@@ -6,7 +6,9 @@ body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: udiv_by_scalar_const
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 818089009
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
@@ -68,44 +70,32 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 23
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 34
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -23
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 56
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 128
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -256
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3855
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 8195
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 512
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32767
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32639
- ; CHECK-NEXT: [[C21:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C15]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C11]](s16), [[C13]](s16), [[C16]](s16), [[C17]](s16), [[C18]](s16), [[C20]](s16), [[C21]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C7]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C10]](s16), [[C12]](s16), [[C14]](s16), [[C8]](s16), [[C8]](s16), [[C19]](s16), [[C19]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR1]](<8 x s16>)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR2]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3855
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 8195
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 512
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32767
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+ ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32639
+ ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C8]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C2]](s16), [[C4]](s16), [[C6]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16), [[C13]](s16), [[C14]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C3]](s16), [[C5]](s16), [[C7]](s16), [[C1]](s16), [[C1]](s16), [[C12]](s16), [[C12]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<8 x s16>)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR1]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR3]]
+ ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR2]]
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UMULH1]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR4]](<8 x s16>)
- ; CHECK-NEXT: [[C22:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR5]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR3]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 23
@@ -136,38 +126,26 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -34
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 35
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 36
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -37
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 38
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -39
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 40
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -41
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 16393
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -5617
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 -7281
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32749
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 -10347
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 8197
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 -13107
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32747
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C12]](s16), [[C14]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16), [[C19]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C11]](s16), [[C13]](s16), [[C13]](s16), [[C16]](s16), [[C13]](s16), [[C11]](s16), [[C13]](s16), [[C16]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR1]](<8 x s16>)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR2]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[UMULH]], [[BUILD_VECTOR3]](<8 x s16>)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR4]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 16393
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -5617
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -7281
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32749
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 -10347
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 8197
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -13107
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32747
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C4]](s16), [[C6]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C3]](s16), [[C5]](s16), [[C5]](s16), [[C8]](s16), [[C5]](s16), [[C3]](s16), [[C5]](s16), [[C8]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<8 x s16>)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[UMULH]], [[BUILD_VECTOR2]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 -34
@@ -198,39 +176,28 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 7
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 23
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 27
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 31
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 47
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 63
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 127
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 18351
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 12137
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 23705
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 1041
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 517
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C12]](s16), [[C13]](s16), [[C14]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C11]](s16), [[C11]](s16), [[C11]](s16), [[C11]](s16), [[C16]](s16), [[C16]](s16), [[C19]](s16)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 18351
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 12137
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 23705
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 1041
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 517
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C3]](s16), [[C3]](s16), [[C3]](s16), [[C3]](s16), [[C8]](s16), [[C8]](s16), [[C11]](s16)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[SUB]], [[BUILD_VECTOR3]](<8 x s16>)
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[SUB]], [[BUILD_VECTOR2]](<8 x s16>)
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[LSHR]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR2]](<8 x s16>)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR3]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR1]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 7
@@ -261,19 +228,17 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -64
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s8) = G_CONSTANT i8 -85
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s8) = G_CONSTANT i8 7
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C3]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C4]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<16 x s8>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<16 x s8>) = G_LSHR [[UMULH]], [[BUILD_VECTOR2]](<16 x s8>)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<16 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<16 x s8>), [[BUILD_VECTOR3]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<16 x s8>) = G_SELECT [[ICMP]](<16 x s1>), [[COPY]], [[LSHR]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 -85
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 7
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<16 x s8>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<16 x s8>) = G_LSHR [[UMULH]], [[BUILD_VECTOR1]](<16 x s8>)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s1>) = G_BUILD_VECTOR [[C3]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1)
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<16 x s8>) = G_SELECT [[BUILD_VECTOR2]](<16 x s1>), [[COPY]], [[LSHR]]
; CHECK-NEXT: $q0 = COPY [[SELECT]](<16 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<16 x s8>) = COPY $q0
@@ -299,39 +264,31 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 119
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 73
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -111
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 118
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 32
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 31
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 4957
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -8079
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 4103
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 12
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 16385
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 14
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 -29991
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 2048
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C9]](s16), [[C12]](s16), [[C13]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16), [[C19]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C10]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C11]](s16), [[C11]](s16), [[C14]](s16), [[C16]](s16), [[C11]](s16), [[C8]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 4957
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -8079
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 4103
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 12
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 16385
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 14
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 -29991
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 2048
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C4]](s16), [[C5]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C3]](s16), [[C3]](s16), [[C6]](s16), [[C8]](s16), [[C3]](s16), [[C]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR2]]
+ ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR1]]
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UMULH1]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR3]](<8 x s16>)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR4]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR]]
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR2]](<8 x s16>)
+ ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s1>) = G_BUILD_VECTOR [[C13]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1)
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[BUILD_VECTOR3]](<8 x s1>), [[COPY]], [[LSHR]]
; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll
new file mode 100644
index 0000000..32c7423
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll
@@ -0,0 +1,135 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-linux-gnu -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
+
+define i32 @call_nneg(i16 %a) {
+ ; CHECK-LABEL: name: call_nneg
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: %2:_(s32) = nneg G_ZEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: $w0 = COPY %2(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = zext nneg i16 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_not_nneg(i16 %a) {
+ ; CHECK-LABEL: name: call_not_nneg
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = zext i16 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_disjoint(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_disjoint
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %2:_(s32) = disjoint G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY %2(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = or disjoint i32 %a, %b
+ ret i32 %result
+}
+
+define i32 @call_add(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_add
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = add nsw i32 %a, %b
+ ret i32 %result
+}
+
+define i32 @call_not_disjoint(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_not_disjoint
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = or i32 %a, %b
+ ret i32 %result
+}
+
+define <2 x i64> @call_not_disjoint_vector(<2 x i64> %a, <2 x i64> %b) {
+ ; CHECK-LABEL: name: call_not_disjoint_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $q0 = COPY [[OR]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = or <2 x i64> %a, %b
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_disjoint_vector(<2 x i64> %a, <2 x i64> %b) {
+ ; CHECK-LABEL: name: call_disjoint_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: %2:_(<2 x s64>) = disjoint G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $q0 = COPY %2(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = or disjoint <2 x i64> %a, %b
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_nneg_vector(<2 x i32> %a) {
+ ; CHECK-LABEL: name: call_nneg_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: %1:_(<2 x s64>) = nneg G_ZEXT [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY %1(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = zext nneg <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_not_nneg_vector(<2 x i32> %a) {
+ ; CHECK-LABEL: name: call_not_nneg_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<2 x s64>) = G_ZEXT [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[ZEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = zext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll
new file mode 100644
index 0000000..d87e9c4
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -O0 -mtriple=aarch64-linux-gnu -global-isel -stop-after=irtranslator %s -o - | FileCheck %s
+
+define i32 @call_trunc_no_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_no_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_nsw_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_nsw_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nsw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nsw i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_nuw_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_nuw_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nuw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nuw i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_all_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_all_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nuw nsw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nsw nuw i64 %a to i32
+ ret i32 %result
+}
+
+define <2 x i64> @call_trunc_noop_signed_vector(<2 x i64> %a) {
+ ; CHECK-LABEL: name: call_trunc_noop_signed_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s32>) = nsw G_TRUNC [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[TRUNC]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[SEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %truncate = trunc nsw <2 x i64> %a to <2 x i32>
+ %result = sext <2 x i32> %truncate to <2 x i64>
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_trunc_noop_unsigned_vector(<2 x i64> %a) {
+ ; CHECK-LABEL: name: call_trunc_noop_unsigned_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s32>) = nuw G_TRUNC [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<2 x s64>) = G_ZEXT [[TRUNC]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[ZEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %truncate = trunc nuw <2 x i64> %a to <2 x i32>
+ %result = zext <2 x i32> %truncate to <2 x i64>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
index fe9427d..edae903 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
@@ -6,7 +6,7 @@ declare void @llvm.trap()
define void @unreachable() {
; CHECK-LABEL: name: unreachable
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
unreachable
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll
new file mode 100644
index 0000000..10882a0
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-deinterleave2.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -O0 -mtriple=aarch64-- --global-isel --global-isel-abort=2 --verify-machineinstrs --stop-after=irtranslator %s -o - | FileCheck %s
+
+define void @vector_deinterleave2_v4i32(<4 x i32> %a) {
+ ; CHECK-LABEL: name: vector_deinterleave2_v4i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<4 x s32>), [[DEF]], shufflemask(0, 2)
+ ; CHECK-NEXT: [[SHUF1:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<4 x s32>), [[DEF]], shufflemask(1, 3)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call {<2 x i32>, <2 x i32>} @llvm.experimental.vector.deinterleave2.v4i32(<4 x i32> %a)
+ ret void
+}
+
+define void @vector_deinterleave2_v8f32(<8 x float> %a) {
+ ; CHECK-LABEL: name: vector_deinterleave2_v8f32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](<2 x s64>)
+ ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BITCAST]](<4 x s32>), [[BITCAST1]](<4 x s32>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<8 x s32>), [[DEF]], shufflemask(0, 2, 4, 6)
+ ; CHECK-NEXT: [[SHUF1:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<8 x s32>), [[DEF]], shufflemask(1, 3, 5, 7)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float> %a)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll
new file mode 100644
index 0000000..f51e47a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-vector-interleave2.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -O0 -mtriple=aarch64-- --global-isel --global-isel-abort=2 --verify-machineinstrs --stop-after=irtranslator %s -o - | FileCheck %s
+
+define void @vector_interleave2_v4i32(<2 x i32> %a, <2 x i32> %b) {
+ ; CHECK-LABEL: name: vector_interleave2_v4i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $d0, $d1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<2 x s32>), [[COPY1]], shufflemask(0, 2, 1, 3)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call <4 x i32> @llvm.experimental.vector.interleave2.v4i32(<2 x i32> %a, <2 x i32> %b)
+ ret void
+}
+
+define void @vector_interleave2_v8f32(<4 x float> %a, <4 x float> %b) {
+ ; CHECK-LABEL: name: vector_interleave2_v8f32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](<2 x s64>)
+ ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<8 x s32>) = G_SHUFFLE_VECTOR [[BITCAST]](<4 x s32>), [[BITCAST1]], shufflemask(0, 4, 1, 5, 2, 6, 3, 7)
+ ; CHECK-NEXT: RET_ReallyLR
+ %res = call <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float> %a, <4 x float> %b)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
index 3123e30..0d429ae 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
@@ -8,11 +8,12 @@ body: |
bb.0:
; CHECK-LABEL: name: abs_s32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
- ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ASHR]]
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR]]
- ; CHECK-NEXT: $w0 = COPY [[XOR]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[COPY]], [[SUB]]
+ ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
+ ;
; CHECK-CSSC-LABEL: name: abs_s32
; CHECK-CSSC: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-CSSC-NEXT: [[ABS:%[0-9]+]]:_(s32) = G_ABS [[COPY]]
@@ -28,11 +29,12 @@ body: |
bb.0:
; CHECK-LABEL: name: abs_s64
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
- ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ASHR]]
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD]], [[ASHR]]
- ; CHECK-NEXT: $x0 = COPY [[XOR]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s32), [[COPY]], [[SUB]]
+ ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
+ ;
; CHECK-CSSC-LABEL: name: abs_s64
; CHECK-CSSC: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-CSSC-NEXT: [[ABS:%[0-9]+]]:_(s64) = G_ABS [[COPY]]
@@ -55,6 +57,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<4 x s16>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s16
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -82,6 +85,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<8 x s16>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v8s16
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
@@ -109,6 +113,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<2 x s32>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v2s32
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -136,6 +141,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<4 x s32>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s32
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
@@ -163,6 +169,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<8 x s8>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<8 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s8
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -190,6 +197,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<16 x s8>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<16 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v16s8
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
index c9556e2..a63d8b9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
@@ -121,10 +121,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $h1
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32)
- ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[DEF]](s16), [[DEF]](s16)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[ANYEXT]](<4 x s32>)
+ ; CHECK-NEXT: $d0 = COPY [[UV]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s16) = COPY $h0
%1:_(s16) = COPY $h1
@@ -141,8 +142,8 @@ body: |
; CHECK-LABEL: name: widen_v2s8
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
- ; CHECK-NEXT: %3:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[DEF]](s32)
- ; CHECK-NEXT: $d0 = COPY %3(<2 x s32>)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s8) = G_IMPLICIT_DEF
%1:_(s8) = G_IMPLICIT_DEF
@@ -157,12 +158,14 @@ name: widen_v4s8
body: |
bb.0:
; CHECK-LABEL: name: widen_v4s8
- ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[COPY2]](s16), [[DEF]](s16)
- ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF1]](s8), [[DEF2]](s8), [[DEF3]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[UV]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s8) = G_IMPLICIT_DEF
%1:_(s8) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
index 6a6e0b6..26230ef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
@@ -12,22 +12,6 @@ body: |
liveins: $x0, $x1, $x2, $x3, $x4
- ; CHECK-LABEL: name: compare_swap_128
- ; CHECK: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
- ; CHECK: [[COPY:%[0-9]+]]:gpr64(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
- ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
- ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
- ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
- ; CHECK: [[COPY5:%[0-9]+]]:gpr64(s64) = COPY [[COPY1]](s64)
- ; CHECK: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
- ; CHECK: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
- ; CHECK: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
- ; CHECK: early-clobber %13:gpr64(s64), early-clobber %14:gpr64(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire 16)
- ; CHECK: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
- ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
- ; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16)
- ; CHECK: RET_ReallyLR
; CHECK-NOLSE-LABEL: name: compare_swap_128
; CHECK-NOLSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
; CHECK-NOLSE-NEXT: {{ $}}
@@ -40,11 +24,13 @@ body: |
; CHECK-NOLSE-NEXT: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
; CHECK-NOLSE-NEXT: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
; CHECK-NOLSE-NEXT: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
- ; CHECK-NOLSE-NEXT: early-clobber %13:gpr64common(s64), early-clobber %14:gpr64common(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
- ; CHECK-NOLSE-NEXT: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
- ; CHECK-NOLSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
- ; CHECK-NOLSE-NEXT: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
+ ; CHECK-NOLSE-NEXT: early-clobber %14:gpr64common(s64), early-clobber %15:gpr64common(s64), early-clobber %17:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
+ ; CHECK-NOLSE-NEXT: [[COPY9:%[0-9]+]]:gpr64 = COPY %17
+ ; CHECK-NOLSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %14(s64), %15(s64)
+ ; CHECK-NOLSE-NEXT: [[COPY10:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
+ ; CHECK-NOLSE-NEXT: G_STORE [[COPY10]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-NOLSE-NEXT: RET_ReallyLR
+ ;
; CHECK-LSE-LABEL: name: compare_swap_128
; CHECK-LSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
; CHECK-LSE-NEXT: {{ $}}
@@ -59,7 +45,8 @@ body: |
; CHECK-LSE-NEXT: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 0
; CHECK-LSE-NEXT: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 64
; CHECK-LSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[EXTRACT]](s64), [[EXTRACT1]](s64)
- ; CHECK-LSE-NEXT: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
+ ; CHECK-LSE-NEXT: [[COPY5:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
+ ; CHECK-LSE-NEXT: G_STORE [[COPY5]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-LSE-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%3:_(s64) = COPY $x1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
index 3c01078..05e6212 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
@@ -16,13 +16,16 @@ body: |
liveins: $x0
; CHECK-LABEL: name: cmpxchg_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[C]]
- ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[ATOMIC_CMPXCHG]], [[ICMP]]
- ; CHECK: $w0 = COPY [[MUL]](s32)
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[C]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY1]], [[ICMP]]
+ ; CHECK-NEXT: $w0 = COPY [[MUL]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_CONSTANT i32 1
@@ -40,14 +43,17 @@ body: |
liveins: $x0
; CHECK-LABEL: name: cmpxchg_i64
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[C]]
- ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ATOMIC_CMPXCHG]], [[ANYEXT]]
- ; CHECK: $x0 = COPY [[MUL]](s64)
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[C]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[ANYEXT]]
+ ; CHECK-NEXT: $x0 = COPY [[MUL]](s64)
%0:_(p0) = COPY $x0
%1:_(s64) = G_CONSTANT i64 0
%2:_(s64) = G_CONSTANT i64 1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
index d2352be..27f2f0b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
@@ -37,6 +37,7 @@ body: |
; CHECK-NEXT: %ctpop:_(s32) = G_LSHR [[MUL]], [[C7]](s64)
; CHECK-NEXT: $w0 = COPY %ctpop(s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
+ ;
; CHECK-CSSC-LABEL: name: s32
; CHECK-CSSC: liveins: $w0
; CHECK-CSSC-NEXT: {{ $}}
@@ -77,11 +78,12 @@ body: |
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C5]]
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; CHECK-NEXT: %ctpop:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
; CHECK-NEXT: $x0 = COPY %ctpop(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
+ ;
; CHECK-CSSC-LABEL: name: s64
; CHECK-CSSC: liveins: $x0
; CHECK-CSSC-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
index 5662de4..f7550ce 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -48,7 +48,7 @@ define void @bar() personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: $x0 = COPY [[LOAD]](p0)
; CHECK-NEXT: BL @_Unwind_Resume, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
%exn.slot = alloca ptr
%ehselector.slot = alloca i32
%1 = invoke i32 @foo(i32 42) to label %continue unwind label %cleanup
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
index e12353c..d3db243 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
@@ -235,31 +235,32 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY3]](s32)
; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[DEF2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[BUILD_VECTOR]], [[C2]](s16), [[C1]](s64)
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[UV]], [[C2]](s16), [[C1]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
+ ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
+ ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<16 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 16, 16, 16, 1, 16, 16, 16, 2, 16, 16, 16, undef, undef, undef, undef)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[SHUF]](<16 x s8>)
; CHECK-NEXT: [[UITOFP:%[0-9]+]]:_(<4 x s32>) = G_UITOFP [[BITCAST]](<4 x s32>)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>)
- ; CHECK-NEXT: G_STORE [[UV4]](s32), [[COPY]](p0) :: (store (s32), align 16)
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[UV6]](s32), [[COPY]](p0) :: (store (s32), align 16)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; CHECK-NEXT: G_STORE [[UV5]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
+ ; CHECK-NEXT: G_STORE [[UV7]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
- ; CHECK-NEXT: G_STORE [[UV6]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
+ ; CHECK-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
; CHECK-NEXT: G_BR %bb.1
bb.1:
liveins: $w1, $w2, $w3, $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
index 5cbb864..b8328ed 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
@@ -607,9 +607,11 @@ body: |
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32)
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
- ; CHECK-NEXT: $s0 = COPY [[TRUNC]](<2 x s16>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[DEF]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<4 x s32>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[TRUNC]](<4 x s16>)
+ ; CHECK-NEXT: $s0 = COPY [[UV]](<2 x s16>)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%1(<2 x s16>) = G_LOAD %0(p0) :: (load (<2 x s16>))
@@ -711,33 +713,24 @@ body: |
; CHECK: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD %ptr(p0) :: (load (p0), align 64)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (<2 x s64>), align 64)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD]](p0) :: (load (p0) from unknown-address + 8)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD1]](<2 x s64>)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
- ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load (p0) from unknown-address + 16, align 16)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
- ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD2]](p0) :: (load (p0) from unknown-address + 24)
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
- ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD3]](p0) :: (load (p0) from unknown-address + 32, align 32)
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
- ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD4]](p0) :: (load (p0) from unknown-address + 40)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[LOAD]](p0), [[LOAD1]](p0)
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[LOAD2]](p0), [[LOAD3]](p0)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[LOAD4]](p0), [[LOAD5]](p0)
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BUILD_VECTOR]](<2 x p0>)
- ; CHECK-NEXT: G_STORE [[BITCAST]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 64)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
- ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BUILD_VECTOR1]](<2 x p0>)
- ; CHECK-NEXT: G_STORE [[BITCAST1]](<2 x s64>), [[PTR_ADD5]](p0) :: (store (<2 x s64>) into unknown-address + 16)
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
- ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BUILD_VECTOR2]](<2 x p0>)
- ; CHECK-NEXT: G_STORE [[BITCAST2]](<2 x s64>), [[PTR_ADD6]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32)
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 32, align 32)
+ ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD2]](<2 x s64>)
+ ; CHECK-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST]](<2 x p0>)
+ ; CHECK-NEXT: G_STORE [[BITCAST3]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[BITCAST4:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST1]](<2 x p0>)
+ ; CHECK-NEXT: G_STORE [[BITCAST4]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into unknown-address + 16)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST2]](<2 x p0>)
+ ; CHECK-NEXT: G_STORE [[BITCAST5]](<2 x s64>), [[PTR_ADD3]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%val:_(<6 x p0>) = G_LOAD %ptr(p0) :: (load (<6 x p0>))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
index 63a26dc..e49a94c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
@@ -293,41 +293,44 @@ body: |
; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), %w0(s32), [[C]]
; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ICMP2]], 1
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[COPY2]](s16), [[DEF1]](s16)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SEXT_INREG]](s32)
- ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[BUILD_VECTOR]], [[TRUNC]](s16), [[C1]](s64)
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
- ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC1]](s8), [[TRUNC2]](s8), [[TRUNC3]](s8), [[TRUNC4]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8)
+ ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[UV]], [[TRUNC]](s16), [[C1]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
+ ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
+ ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV5]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC1]](s8), [[TRUNC2]](s8), [[TRUNC3]](s8), [[TRUNC4]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<8 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<8 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 0, 0, 0, undef, undef, undef, undef)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(<4 x s8>), [[UV5:%[0-9]+]]:_(<4 x s8>) = G_UNMERGE_VALUES [[SHUF]](<8 x s8>)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY3]](s16), [[COPY4]](s16), [[COPY5]](s16), [[C2]](s16)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[UV4]](<4 x s8>)
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[ANYEXT]], [[BUILD_VECTOR3]]
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(<4 x s8>), [[UV7:%[0-9]+]]:_(<4 x s8>) = G_UNMERGE_VALUES [[SHUF]](<8 x s8>)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; CHECK-NEXT: [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UV6]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[UV8]](s8), [[UV9]](s8), [[UV10]](s8), [[UV11]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR3]](<8 x s8>)
+ ; CHECK-NEXT: [[UV12:%[0-9]+]]:_(<4 x s16>), [[UV13:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT1]](<8 x s16>)
+ ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR4]](<8 x s8>)
+ ; CHECK-NEXT: [[UV14:%[0-9]+]]:_(<4 x s16>), [[UV15:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT2]](<8 x s16>)
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[UV12]], [[UV14]]
; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[ICMP]](<4 x s32>)
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[UV4]](<4 x s8>)
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC5]], [[ANYEXT1]]
+ ; CHECK-NEXT: [[UV16:%[0-9]+]]:_(s8), [[UV17:%[0-9]+]]:_(s8), [[UV18:%[0-9]+]]:_(s8), [[UV19:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UV6]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[UV16]](s8), [[UV17]](s8), [[UV18]](s8), [[UV19]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR5]](<8 x s8>)
+ ; CHECK-NEXT: [[UV20:%[0-9]+]]:_(<4 x s16>), [[UV21:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT3]](<8 x s16>)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC5]], [[UV20]]
; CHECK-NEXT: [[TRUNC6:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[ICMP1]](<4 x s32>)
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC6]], [[XOR]]
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s16>) = G_OR [[AND]], [[AND1]]
- ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[OR]](<4 x s16>)
+ ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[OR]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
- ; CHECK-NEXT: %zext_select:_(<4 x s32>) = G_AND [[ANYEXT2]], [[BUILD_VECTOR4]]
+ ; CHECK-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
+ ; CHECK-NEXT: %zext_select:_(<4 x s32>) = G_AND [[ANYEXT4]], [[BUILD_VECTOR6]]
; CHECK-NEXT: $q0 = COPY %zext_select(<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%w0:_(s32) = COPY $w0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
index 42a8f51..f7efaea 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
@@ -1,16 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -global-isel-abort=2 -global-isel -o - %s | FileCheck %s
+; RUN: llc -global-isel -o - %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-macosx11.0.0"
declare i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32>) #0
-; This test currently falls back but ensures we don't crash.
-
define i32 @bar() {
; CHECK-LABEL: bar:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: movi.2d v0, #0000000000000000
+; CHECK-NEXT: mov b1, v0[1]
+; CHECK-NEXT: mov b2, v0[2]
+; CHECK-NEXT: mov b3, v0[3]
+; CHECK-NEXT: mov.h v0[1], v1[0]
+; CHECK-NEXT: mov.h v2[1], v3[0]
+; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: ushll.4s v1, v2, #0
+; CHECK-NEXT: mov.d v0[1], v1[0]
+; CHECK-NEXT: movi.4s v1, #1
+; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: addv.4s s0, v0
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
index 6612651..e729f02 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
@@ -540,10 +540,14 @@ body: |
; CHECK: liveins: $d0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s8>) = G_TRUNC [[COPY]](<2 x s32>)
- ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s8>) = G_CONCAT_VECTORS [[TRUNC]](<2 x s8>), [[TRUNC]](<2 x s8>)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[CONCAT_VECTORS]](<4 x s8>)
- ; CHECK-NEXT: $d0 = COPY [[ANYEXT]](<4 x s16>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s32)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC]](s8), [[TRUNC1]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(<4 x s16>), [[UV3:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[UV2]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s8>) = G_TRUNC %0(<2 x s32>)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index c9e5f89..ac3c47c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -752,6 +752,15 @@
# DEBUG-NEXT: G_BZERO (opcode {{[0-9]+}}): 2 type indices, 1 imm index
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: G_TRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_DEBUGTRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_UBSANTRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_VECREDUCE_SEQ_FADD (opcode {{[0-9]+}}): 3 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 0cf9602..499c08f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -40,11 +40,12 @@ body: |
; CHECK-LABEL: name: ldrxrox_breg_oreg
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $x0 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x0 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -65,11 +66,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrdrox_breg_oreg
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d0 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d0 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -78,6 +80,9 @@ body: |
RET_ReallyLR implicit $d0
...
---
+# This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
+# the G_LOAD
+
name: more_than_one_use
alignment: 4
legalized: true
@@ -87,18 +92,17 @@ machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1
- ; This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
- ; the G_LOAD
; CHECK-LABEL: name: more_than_one_use
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
- ; CHECK: $x0 = COPY [[ADDXrr1]]
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXrr1]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -121,11 +125,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_shl
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -148,11 +153,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_shl
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -175,11 +181,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_mul_rhs
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
@@ -202,11 +209,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_mul_rhs
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
@@ -229,11 +237,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_mul_lhs
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -256,11 +265,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_mul_lhs
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -272,6 +282,9 @@ body: |
...
---
+# Show that we don't get a shifted load from a mul when we don't have a
+# power of 2. (The bit isn't set on the load.)
+
name: mul_not_pow_2
alignment: 4
legalized: true
@@ -280,19 +293,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't get a shifted load from a mul when we don't have a
- ; power of 2. (The bit isn't set on the load.)
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: mul_not_pow_2
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
- ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
- ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+ ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 7
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -304,6 +316,9 @@ body: |
...
---
+# Show that we don't get a shifted load from a mul when we don't have
+# the right power of 2. (The bit isn't set on the load.)
+
name: mul_wrong_pow_2
alignment: 4
legalized: true
@@ -312,19 +327,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't get a shifted load from a mul when we don't have
- ; the right power of 2. (The bit isn't set on the load.)
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: mul_wrong_pow_2
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
- ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
- ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+ ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 16
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -336,6 +350,9 @@ body: |
...
---
+# Show that we can still fall back to the register-register addressing
+# mode when we fail to pull in the shift.
+
name: more_than_one_use_shl_1
alignment: 4
legalized: true
@@ -344,19 +361,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we can still fall back to the register-register addressing
- ; mode when we fail to pull in the shift.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_1
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -370,6 +386,9 @@ body: |
...
---
+# Show that when the GEP is used outside a memory op, we don't do any
+# folding at all.
+
name: more_than_one_use_shl_2
alignment: 4
legalized: true
@@ -378,22 +397,21 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when the GEP is used outside a memory op, we don't do any
- ; folding at all.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_2
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
- ; CHECK: $x2 = COPY [[ADDXrr2]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -409,6 +427,9 @@ body: |
...
---
+# Show that when we have a fastpath for shift-left, we perform the folding
+# if it has more than one use.
+
name: more_than_one_use_shl_lsl_fast
alignment: 4
legalized: true
@@ -417,18 +438,17 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when we have a fastpath for shift-left, we perform the folding
- ; if it has more than one use.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -442,6 +462,9 @@ body: |
...
---
+# Show that we don't fold into multiple memory ops when we don't have a
+# fastpath for shift-left.
+
name: more_than_one_use_shl_lsl_slow
alignment: 4
legalized: true
@@ -450,19 +473,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't fold into multiple memory ops when we don't have a
- ; fastpath for shift-left.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -476,6 +498,9 @@ body: |
...
---
+# Show that when we're optimizing for size, we'll do the folding no matter
+# what.
+
name: more_than_one_use_shl_minsize
alignment: 4
legalized: true
@@ -484,22 +509,21 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when we're optimizing for size, we'll do the folding no matter
- ; what.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_minsize
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
- ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
- ; CHECK: $x2 = COPY [[ADDXrr1]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr1]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -525,11 +549,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldrwrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
- ; CHECK: $w2 = COPY [[LDRWroX]]
- ; CHECK: RET_ReallyLR implicit $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: $w2 = COPY [[LDRWroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -549,11 +574,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrsrox
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
- ; CHECK: $s2 = COPY [[LDRSroX]]
- ; CHECK: RET_ReallyLR implicit $h2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: $s2 = COPY [[LDRSroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -573,11 +599,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldrhrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
- ; CHECK: $h2 = COPY [[LDRHroX]]
- ; CHECK: RET_ReallyLR implicit $h2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
+ ; CHECK-NEXT: $h2 = COPY [[LDRHroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -597,11 +624,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldbbrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
- ; CHECK: $w2 = COPY [[LDRBBroX]]
- ; CHECK: RET_ReallyLR implicit $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
+ ; CHECK-NEXT: $w2 = COPY [[LDRBBroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -621,11 +649,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrqrox
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
- ; CHECK: $q0 = COPY [[LDRQroX]]
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
+ ; CHECK-NEXT: $q0 = COPY [[LDRQroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
index 94f56e5..9483cbf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="addo_by_0" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="match_addos" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
# REQUIRES: asserts
# (G_*ADDO x, 0) -> x + no carry
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
index ad66fa5..25ecce4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
@@ -26,7 +26,7 @@ body: |
; CHECK-LABEL: name: foo
; CHECK: BRK 1
; CHECK: RET_ReallyLR
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
RET_ReallyLR
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
index bcdd77a..b3613f5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
# RUN: llc -O0 -mtriple=aarch64-apple-ios -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=IOS
# RUN: llc -O0 -mtriple=aarch64-linux-gnu -relocation-model=pic -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LINUX-PIC
@@ -26,40 +27,35 @@
...
---
-# CHECK-LABEL: name: frame_index
name: frame_index
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
-
stack:
- { id: 0, name: ptr0, offset: 0, size: 8, alignment: 8 }
-
-# CHECK: body:
-# CHECK: %0:gpr64sp = ADDXri %stack.0.ptr0, 0, 0
body: |
bb.0:
- %0(p0) = G_FRAME_INDEX %stack.0.ptr0
+ ; CHECK-LABEL: name: frame_index
+ ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0.ptr0, 0, 0
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ %0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr0
$x0 = COPY %0(p0)
...
---
---
-# CHECK-LABEL: name: ptr_mask
name: ptr_mask
legalized: true
regBankSelected: true
-
-# CHECK: body:
-# CHECK: %2:gpr64sp = ANDXri %0, 8060
body: |
bb.0:
liveins: $x0
+ ; CHECK-LABEL: name: ptr_mask
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[COPY]], 8060
+ ; CHECK-NEXT: $x0 = COPY [[ANDXri]]
%0:gpr(p0) = COPY $x0
%const:gpr(s64) = G_CONSTANT i64 -8
%1:gpr(p0) = G_PTRMASK %0, %const
@@ -68,180 +64,171 @@ body: |
---
# Global defined in the same linkage unit so no GOT is needed
-# CHECK-LABEL: name: global_local
name: global_local
legalized: true
regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0:gpr64common = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
-# LINUX-PIC: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_local
body: |
bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_local
+ ; IOS-LABEL: name: global_local
+ ; IOS: [[MOVaddr:%[0-9]+]]:gpr64common = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
+ ; IOS-NEXT: $x0 = COPY [[MOVaddr]]
+ ;
+ ; LINUX-PIC-LABEL: name: global_local
+ ; LINUX-PIC: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @var_local
+ ; LINUX-PIC-NEXT: $x0 = COPY [[LOADgot]]
+ %0:gpr(p0) = G_GLOBAL_VALUE @var_local
$x0 = COPY %0(p0)
...
---
-# CHECK-LABEL: name: global_got
name: global_got
legalized: true
regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_got
-# LINUX-PIC: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_got
body: |
bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_got
+ ; CHECK-LABEL: name: global_got
+ ; CHECK: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @var_got
+ ; CHECK-NEXT: $x0 = COPY [[LOADgot]]
+ %0:gpr(p0) = G_GLOBAL_VALUE @var_got
$x0 = COPY %0(p0)
...
---
-# CHECK-LABEL: name: icmp
name: icmp
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
- - { id: 8, class: gpr }
- - { id: 9, class: gpr }
- - { id: 10, class: gpr }
- - { id: 11, class: gpr }
-
-# CHECK: body:
-# CHECK: SUBSWrr %0, %0, implicit-def $nzcv
-# CHECK: %1:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-
-# CHECK: SUBSXrr %2, %2, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-
-# CHECK: SUBSXrr %4, %4, implicit-def $nzcv
-# CHECK: %5:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-
body: |
bb.0:
liveins: $w0, $x0
- %0(s32) = COPY $w0
- %1(s32) = G_ICMP intpred(eq), %0, %0
- %6(s8) = G_TRUNC %1(s32)
- %9(s32) = G_ANYEXT %6
+ ; CHECK-LABEL: name: icmp
+ ; CHECK: liveins: $w0, $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[CSINCWr]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY2]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32all = COPY [[CSINCWr1]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY3]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY4]], [[COPY4]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr2:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[CSINCWr2]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY5]]
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_ICMP intpred(eq), %0, %0
+ %6:gpr(s8) = G_TRUNC %1(s32)
+ %9:gpr(s32) = G_ANYEXT %6
$w0 = COPY %9(s32)
- %2(s64) = COPY $x0
- %3(s32) = G_ICMP intpred(uge), %2, %2
- %7(s8) = G_TRUNC %3(s32)
- %10(s32) = G_ANYEXT %7
+ %2:gpr(s64) = COPY $x0
+ %3:gpr(s32) = G_ICMP intpred(uge), %2, %2
+ %7:gpr(s8) = G_TRUNC %3(s32)
+ %10:gpr(s32) = G_ANYEXT %7
$w0 = COPY %10(s32)
- %4(p0) = COPY $x0
- %5(s32) = G_ICMP intpred(ne), %4, %4
- %8(s8) = G_TRUNC %5(s32)
- %11(s32) = G_ANYEXT %8
+ %4:gpr(p0) = COPY $x0
+ %5:gpr(s32) = G_ICMP intpred(ne), %4, %4
+ %8:gpr(s8) = G_TRUNC %5(s32)
+ %11:gpr(s32) = G_ANYEXT %8
$w0 = COPY %11(s32)
...
---
-# CHECK-LABEL: name: fcmp
name: fcmp
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: fpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
-
-# CHECK: body:
-# CHECK: nofpexcept FCMPSrr %0, %0, implicit-def $nzcv
-# CHECK: [[TST_MI:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
-# CHECK: [[TST_GT:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-# CHECK: %1:gpr32 = ORRWrr [[TST_MI]], [[TST_GT]]
-
-# CHECK: nofpexcept FCMPDrr %2, %2, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
-
body: |
bb.0:
liveins: $w0, $x0
- %0(s32) = COPY $s0
- %1(s32) = G_FCMP floatpred(one), %0, %0
- %4(s8) = G_TRUNC %1(s32)
- %6(s32) = G_ANYEXT %4
- $w0 = COPY %6(s32)
+ ; CHECK-LABEL: name: fcmp
+ ; CHECK: liveins: $w0, $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: nofpexcept FCMPSrr [[COPY]], [[COPY]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
+ ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+ ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[ORRWrr]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: nofpexcept FCMPDrr [[COPY2]], [[COPY2]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr2:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32all = COPY [[CSINCWr2]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY3]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: nofpexcept FCMPSrr [[COPY4]], [[COPY4]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr3:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 15, implicit $nzcv
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[CSINCWr3]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY5]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: nofpexcept FCMPDrr [[COPY6]], [[COPY6]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr4:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 14, implicit $nzcv
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gpr32all = COPY [[CSINCWr4]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY7]]
+ %0:fpr(s32) = COPY $s0
+ %1:gpr(s32) = G_FCMP floatpred(one), %0, %0
+ %2:gpr(s8) = G_TRUNC %1(s32)
+ %3:gpr(s32) = G_ANYEXT %2
+ $w0 = COPY %3(s32)
- %2(s64) = COPY $d0
- %3(s32) = G_FCMP floatpred(uge), %2, %2
- %5(s8) = G_TRUNC %3(s32)
- %7(s32) = G_ANYEXT %5
+ %4:fpr(s64) = COPY $d0
+ %5:gpr(s32) = G_FCMP floatpred(uge), %4, %4
+ %6:gpr(s8) = G_TRUNC %5(s32)
+ %7:gpr(s32) = G_ANYEXT %6
$w0 = COPY %7(s32)
+ %8:fpr(s32) = COPY $s0
+ %9:gpr(s32) = G_FCMP floatpred(true), %8, %8
+ %10:gpr(s8) = G_TRUNC %9(s32)
+ %11:gpr(s32) = G_ANYEXT %10
+ $w0 = COPY %11(s32)
+
+ %12:fpr(s64) = COPY $d0
+ %13:gpr(s32) = G_FCMP floatpred(false), %12, %12
+ %14:gpr(s8) = G_TRUNC %13(s32)
+ %15:gpr(s32) = G_ANYEXT %14
+ $w0 = COPY %15(s32)
+
...
---
-# CHECK-LABEL: name: phi
name: phi
legalized: true
regBankSelected: true
tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: bb.1:
-# CHECK: %2:fpr32 = PHI %0, %bb.0, %2, %bb.1
-
body: |
+ ; CHECK-LABEL: name: phi
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $s0, $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:fpr32 = PHI [[COPY]], %bb.0, [[PHI]], %bb.1
+ ; CHECK-NEXT: TBNZW [[COPY1]], 0, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $s0 = COPY [[PHI]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $s0
bb.0:
liveins: $s0, $w0
successors: %bb.1
- %0(s32) = COPY $s0
+ %0:fpr(s32) = COPY $s0
%3:gpr(s32) = COPY $w0
bb.1:
successors: %bb.1, %bb.2
- %2(s32) = PHI %0, %bb.0, %2, %bb.1
+ %2:fpr(s32) = PHI %0, %bb.0, %2, %bb.1
G_BRCOND %3, %bb.1
bb.2:
@@ -250,60 +237,46 @@ body: |
...
---
-# CHECK-LABEL: name: select
name: select
legalized: true
regBankSelected: true
tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 6, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 7, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 8, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 9, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 10, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
- - { id: 8, class: gpr }
- - { id: 9, class: gpr }
-
-# CHECK: body:
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSELWr %1, %2, 1, implicit $nzcv
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %6:gpr64 = CSELXr %4, %5, 1, implicit $nzcv
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %9:gpr64 = CSELXr %7, %8, 1, implicit $nzcv
body: |
bb.0:
liveins: $w0, $w1, $w2
+ ; CHECK-LABEL: name: select
+ ; CHECK: liveins: $w0, $w1, $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
+ ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[COPY1]], [[COPY2]], 1, implicit $nzcv
+ ; CHECK-NEXT: $w0 = COPY [[CSELWr]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ANDSWri1:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[COPY3]], [[COPY4]], 1, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[CSELXr]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ANDSWri2:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELXr1:%[0-9]+]]:gpr64 = CSELXr [[COPY5]], [[COPY6]], 1, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[CSELXr1]]
%10:gpr(s32) = COPY $w0
- %1(s32) = COPY $w1
- %2(s32) = COPY $w2
- %3(s32) = G_SELECT %10, %1, %2
+ %1:gpr(s32) = COPY $w1
+ %2:gpr(s32) = COPY $w2
+ %3:gpr(s32) = G_SELECT %10, %1, %2
$w0 = COPY %3(s32)
- %4(s64) = COPY $x0
- %5(s64) = COPY $x1
- %6(s64) = G_SELECT %10, %4, %5
+ %4:gpr(s64) = COPY $x0
+ %5:gpr(s64) = COPY $x1
+ %6:gpr(s64) = G_SELECT %10, %4, %5
$x0 = COPY %6(s64)
- %7(p0) = COPY $x0
- %8(p0) = COPY $x1
- %9(p0) = G_SELECT %10, %7, %8
+ %7:gpr(p0) = COPY $x0
+ %8:gpr(p0) = COPY $x1
+ %9:gpr(p0) = G_SELECT %10, %7, %8
$x0 = COPY %9(p0)
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
index f4366fb..b242c68 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
@@ -26,7 +26,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -48,7 +48,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s8)
@@ -80,7 +80,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -102,7 +102,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -134,7 +134,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: liveins: $x2
@@ -165,7 +165,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
liveins: $x2
@@ -206,7 +206,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -228,7 +228,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ANYEXT %6(s16)
@@ -261,7 +261,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -284,7 +284,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -317,7 +317,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -340,7 +340,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -377,7 +377,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: liveins: $x2
@@ -410,7 +410,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
liveins: $x2
@@ -512,7 +512,7 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s16)
@@ -544,7 +544,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %6(s16)
@@ -577,7 +577,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -601,7 +601,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %7(s8)
@@ -634,7 +634,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -658,7 +658,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %7(s8)
@@ -692,7 +692,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -717,7 +717,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%10:_(s32) = G_ZEXT %8(s8)
@@ -783,7 +783,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDO]](s16)
@@ -804,7 +804,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%6:_(s32) = G_ANYEXT %4(s16)
@@ -839,7 +839,7 @@ body: |
; CHECK-NEXT: RET_ReallyLR implicit $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
bb.1:
successors: %bb.2(0x7ffff800), %bb.3(0x00000800)
liveins: $w0, $w1
@@ -860,6 +860,6 @@ body: |
RET_ReallyLR implicit $w0
bb.3:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
index 5829969..d4d803a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
@@ -3,7 +3,7 @@
define void @UphPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr_p8to15 = COPY %0
; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_p8to15 */, %1
; CHECK: RET_ReallyLR
@@ -17,7 +17,7 @@ entry:
define void @UpaPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr = COPY %0
; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR */, %1
; CHECK: RET_ReallyLR
@@ -31,7 +31,7 @@ entry:
define void @UplPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr_3b = COPY %0
; CHECK: INLINEASM &"fadd z0.h, $0/m, z0.h, #0.5", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_3b */, %1
; CHECK: RET_ReallyLR
diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index dbc5417..61a4f64 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -3,8 +3,7 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve < %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK-GI: warning: Instruction selection used fallback path for smull_zext_v4i16_v4i32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
+; CHECK-GI: warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for smlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for umlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for smlsl2_v4i32_uzp1
@@ -189,13 +188,49 @@ define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(ptr %A, ptr %B) nounwind {
}
define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind {
-; CHECK-LABEL: smull_zext_v4i16_v4i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr s0, [x0]
-; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-NEON-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-NEON: // %bb.0:
+; CHECK-NEON-NEXT: ldr s0, [x0]
+; CHECK-NEON-NEXT: ldr d1, [x1]
+; CHECK-NEON-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-NEON-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-NEON-NEXT: ret
+;
+; CHECK-SVE-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-SVE: // %bb.0:
+; CHECK-SVE-NEXT: ldr s0, [x0]
+; CHECK-SVE-NEXT: ldr d1, [x1]
+; CHECK-SVE-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-SVE-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-GI-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr w8, [x0]
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: fmov w9, s1
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov w11, s3
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: fmov s2, w10
+; CHECK-GI-NEXT: fmov s3, w11
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v2.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: ldr d2, [x1]
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: sshll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: ret
%load.A = load <4 x i8>, ptr %A
%load.B = load <4 x i16>, ptr %B
%zext.A = zext <4 x i8> %load.A to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
index cf9ed4d..573f921 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
@@ -20,7 +20,7 @@ entry:
define i8 @test2(i32 %a) {
; CHECK-LABEL: test2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #135
+; CHECK-NEXT: mov w8, #135 // =0x87
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: cmp w8, #1024
; CHECK-NEXT: cset w0, eq
@@ -37,7 +37,7 @@ entry:
define i8 @test3(i32 %a) {
; CHECK-LABEL: test3:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #1024
+; CHECK-NEXT: mov w8, #1024 // =0x400
; CHECK-NEXT: movk w8, #33, lsl #16
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: cmp w8, #1024
@@ -84,7 +84,7 @@ entry:
define i8 @test6(i64 %a) {
; CHECK-LABEL: test6:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #135
+; CHECK-NEXT: mov w8, #135 // =0x87
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
; CHECK-NEXT: cset w0, eq
@@ -101,7 +101,7 @@ entry:
define i8 @test7(i64 %a) {
; CHECK-LABEL: test7:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #1024
+; CHECK-NEXT: mov w8, #1024 // =0x400
; CHECK-NEXT: movk w8, #33, lsl #16
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
@@ -175,7 +175,7 @@ define i32 @test9(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
; CHECK-NEXT: cmp w2, #1
; CHECK-NEXT: b.lt .LBB8_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
-; CHECK-NEXT: mov w9, #1024
+; CHECK-NEXT: mov w9, #1024 // =0x400
; CHECK-NEXT: mov w8, w2
; CHECK-NEXT: movk w9, #32, lsl #16
; CHECK-NEXT: .LBB8_2: // %for.body
@@ -226,7 +226,7 @@ define void @test10(ptr nocapture %x, ptr nocapture readonly %y, ptr nocapture %
; CHECK-LABEL: test10:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr w8, [x1]
-; CHECK-NEXT: mov w9, #1024
+; CHECK-NEXT: mov w9, #1024 // =0x400
; CHECK-NEXT: movk w9, #32, lsl #16
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: str w8, [x0]
@@ -253,7 +253,7 @@ entry:
define i8 @test11(i64 %a) {
; CHECK-LABEL: test11:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #-1610612736
+; CHECK-NEXT: mov w8, #-1610612736 // =0xa0000000
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
; CHECK-NEXT: cset w0, eq
diff --git a/llvm/test/CodeGen/AArch64/abs.ll b/llvm/test/CodeGen/AArch64/abs.ll
index e00f70b..78c1ff7 100644
--- a/llvm/test/CodeGen/AArch64/abs.ll
+++ b/llvm/test/CodeGen/AArch64/abs.ll
@@ -15,9 +15,8 @@ define i8 @abs_i8(i8 %a){
; CHECK-GI-LABEL: abs_i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: asr w8, w8, #7
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i8 @llvm.abs.i8(i8 %a, i1 0)
@@ -36,9 +35,8 @@ define i16 @abs_i16(i16 %a){
; CHECK-GI-LABEL: abs_i16:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: sxth w8, w0
-; CHECK-GI-NEXT: asr w8, w8, #15
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i16 @llvm.abs.i16(i16 %a, i1 0)
@@ -55,9 +53,8 @@ define i32 @abs_i32(i32 %a){
;
; CHECK-GI-LABEL: abs_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: asr w8, w0, #31
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i32 @llvm.abs.i32(i32 %a, i1 0)
@@ -74,9 +71,8 @@ define i64 @abs_i64(i64 %a){
;
; CHECK-GI-LABEL: abs_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: asr x8, x0, #63
-; CHECK-GI-NEXT: add x9, x0, x8
-; CHECK-GI-NEXT: eor x0, x9, x8
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: cneg x0, x0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i64 @llvm.abs.i64(i64 %a, i1 0)
@@ -248,9 +244,9 @@ define <1 x i32> @abs_v1i32(<1 x i32> %a){
; CHECK-GI-LABEL: abs_v1i32:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: asr w9, w8, #31
-; CHECK-GI-NEXT: add w8, w8, w9
-; CHECK-GI-NEXT: eor w8, w8, w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w8, w9, le
; CHECK-GI-NEXT: fmov s0, w8
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
index 9a4e01a..7244ac9 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
@@ -14,12 +14,12 @@ define void @array_1D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
@@ -81,18 +81,18 @@ define void @array_2D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #5, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #4, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
-; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #5, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #4, mul vl]
-; CHECK-NEXT: st1d { z3.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT: st1d { z5.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z4.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #5, mul vl]
+; CHECK-NEXT: st1d { z3.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: st1d { z5.d }, p0, [sp, #3, mul vl]
+; CHECK-NEXT: st1d { z4.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #6
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
index 7292d52..f03a6f0 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
@@ -13,12 +13,12 @@ define void @test(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/allow-check.ll b/llvm/test/CodeGen/AArch64/allow-check.ll
new file mode 100644
index 0000000..9e4a473
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/allow-check.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+target triple = "aarch64-linux"
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/AArch64/and-sink.ll b/llvm/test/CodeGen/AArch64/and-sink.ll
index 4d08586..f298a55 100644
--- a/llvm/test/CodeGen/AArch64/and-sink.ll
+++ b/llvm/test/CodeGen/AArch64/and-sink.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s
; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' -mtriple=aarch64-linux %s | FileCheck --check-prefix=CHECK-CGP %s
; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' -cgpp-huge-func=0 -mtriple=aarch64-linux %s | FileCheck --check-prefix=CHECK-CGP %s
@@ -9,9 +10,18 @@
; Test that and is sunk into cmp block to form tbz.
define dso_local i32 @and_sink1(i32 %a, i1 %c) {
; CHECK-LABEL: and_sink1:
-; CHECK: tbz w1, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: tbnz {{w[0-9]+}}, #2
+; CHECK: // %bb.0:
+; CHECK-NEXT: tbz w1, #0, .LBB0_3
+; CHECK-NEXT: // %bb.1: // %bb0
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: str wzr, [x8, :lo12:A]
+; CHECK-NEXT: tbnz w0, #2, .LBB0_3
+; CHECK-NEXT: // %bb.2:
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_3: // %bb2
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink1(
; CHECK-CGP-NOT: and i32
@@ -35,12 +45,30 @@ bb2:
; Test that both 'and' and cmp get sunk to form tbz.
define dso_local i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
; CHECK-LABEL: and_sink2:
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: tbz w1, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:B]
-; CHECK: tbz w2, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:C]
-; CHECK: tbnz {{w[0-9]+}}, #2
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, wzr
+; CHECK-NEXT: adrp x9, A
+; CHECK-NEXT: str wzr, [x9, :lo12:A]
+; CHECK-NEXT: tbz w1, #0, .LBB1_5
+; CHECK-NEXT: // %bb.1: // %bb0.preheader
+; CHECK-NEXT: adrp x8, B
+; CHECK-NEXT: adrp x9, C
+; CHECK-NEXT: .LBB1_2: // %bb0
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: str wzr, [x8, :lo12:B]
+; CHECK-NEXT: tbz w2, #0, .LBB1_6
+; CHECK-NEXT: // %bb.3: // %bb1
+; CHECK-NEXT: // in Loop: Header=BB1_2 Depth=1
+; CHECK-NEXT: str wzr, [x9, :lo12:C]
+; CHECK-NEXT: tbnz w0, #2, .LBB1_2
+; CHECK-NEXT: // %bb.4:
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: .LBB1_5: // %common.ret
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB1_6:
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink2(
; CHECK-CGP-NOT: and i32
@@ -71,10 +99,16 @@ bb3:
; Test that 'and' is not sunk since cbz is a better alternative.
define dso_local i32 @and_sink3(i32 %a) {
; CHECK-LABEL: and_sink3:
-; CHECK: and [[REG:w[0-9]+]], w0, #0x3
-; CHECK: [[LOOP:.L[A-Z0-9_]+]]:
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: cbz [[REG]], [[LOOP]]
+; CHECK: // %bb.0:
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: and w9, w0, #0x3
+; CHECK-NEXT: .LBB2_1: // %bb0
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: str wzr, [x8, :lo12:A]
+; CHECK-NEXT: cbz w9, .LBB2_1
+; CHECK-NEXT: // %bb.2: // %bb2
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink3(
; CHECK-CGP-NEXT: and i32
diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
index 225d4c6..cb65867 100644
--- a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -debug-entry-values -mtriple=arm64-apple-darwin | FileCheck %s
-; Stackmap Header: no constants - 6 callsites
+; Stackmap Header: no constants - 18 callsites
; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
; Header
@@ -8,11 +8,11 @@
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 0
; Num Functions
-; CHECK-NEXT: .long 8
+; CHECK-NEXT: .long 18
; Num LargeConstants
; CHECK-NEXT: .long 0
; Num Callsites
-; CHECK-NEXT: .long 8
+; CHECK-NEXT: .long 18
; Functions and stack size
; CHECK-NEXT: .quad _test
@@ -39,6 +39,36 @@
; CHECK-NEXT: .quad _patchpoint_spillargs
; CHECK-NEXT: .quad 128
; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_i32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_i64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_p0
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f16
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v16i8
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v4i32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v4f32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v2f64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
; test
@@ -457,5 +487,194 @@ entry:
ret i64 %result
}
+; generic_test_i32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_i32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define i32 @generic_test_i32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc i32 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i32(i64 14, i32 20, ptr null, i32 0)
+ ret i32 %ret
+}
+
+; generic_test_i64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_i64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define i64 @generic_test_i64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 14, i32 20, ptr null, i32 0)
+ ret i64 %ret
+}
+
+; generic_test_p0
+; CHECK-LABEL: .long L{{.*}}-_generic_test_p0
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define ptr @generic_test_p0() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc ptr (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.p0(i64 14, i32 20, ptr null, i32 0)
+ ret ptr %ret
+}
+
+; generic_test_f16
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f16
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define half @generic_test_f16() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc half (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f16(i64 14, i32 20, ptr null, i32 0)
+ ret half %ret
+}
+
+; generic_test_f32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define float @generic_test_f32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc float (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f32(i64 14, i32 20, ptr null, i32 0)
+ ret float %ret
+}
+
+; generic_test_f64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define double @generic_test_f64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc double (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f64(i64 14, i32 20, ptr null, i32 0)
+ ret double %ret
+}
+
+; generic_test_v16i8
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v16i8
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <16 x i8> @generic_test_v16i8() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <16 x i8> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v16i8(i64 14, i32 20, ptr null, i32 0)
+ ret <16 x i8> %ret
+}
+
+; generic_test_v4i32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v4i32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <4 x i32> @generic_test_v4i32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <4 x i32> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4i32(i64 14, i32 20, ptr null, i32 0)
+ ret <4 x i32> %ret
+}
+
+; generic_test_v4f32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v4f32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <4 x float> @generic_test_v4f32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <4 x float> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4f32(i64 14, i32 20, ptr null, i32 0)
+ ret <4 x float> %ret
+}
+
+; generic_test_v2f64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v2f64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <2 x double> @generic_test_v2f64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <2 x double> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v2f64(i64 14, i32 20, ptr null, i32 0)
+ ret <2 x double> %ret
+}
+
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i32 @llvm.experimental.patchpoint.i32(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare ptr @llvm.experimental.patchpoint.p0(i64, i32, ptr, i32, ...)
+declare half @llvm.experimental.patchpoint.f16(i64, i32, ptr, i32, ...)
+declare float @llvm.experimental.patchpoint.f32(i64, i32, ptr, i32, ...)
+declare double @llvm.experimental.patchpoint.f64(i64, i32, ptr, i32, ...)
+declare <16 x i8> @llvm.experimental.patchpoint.v16i8(i64, i32, ptr, i32, ...)
+declare <4 x i32> @llvm.experimental.patchpoint.v4i32(i64, i32, ptr, i32, ...)
+declare <4 x float> @llvm.experimental.patchpoint.v4f32(i64, i32, ptr, i32, ...)
+declare <2 x double> @llvm.experimental.patchpoint.v2f64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
index 4932529..3007e7c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -8,9 +8,8 @@ define <4 x i16> @fptosi_v4f64_to_v4i16(ptr %ptr) {
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-NEXT: xtn v1.2s, v1.2d
-; CHECK-NEXT: xtn v0.2s, v0.2d
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x double>, ptr %ptr
%tmp2 = fptosi <4 x double> %tmp1 to <4 x i16>
@@ -26,13 +25,10 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(ptr %ptr) {
; CHECK-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-NEXT: fcvtzs v3.2d, v3.2d
; CHECK-NEXT: fcvtzs v2.2d, v2.2d
-; CHECK-NEXT: xtn v0.2s, v0.2d
-; CHECK-NEXT: xtn v1.2s, v1.2d
-; CHECK-NEXT: xtn v3.2s, v3.2d
-; CHECK-NEXT: xtn v2.2s, v2.2d
-; CHECK-NEXT: uzp1 v0.4h, v1.4h, v0.4h
-; CHECK-NEXT: uzp1 v1.4h, v2.4h, v3.4h
-; CHECK-NEXT: uzp1 v0.8b, v1.8b, v0.8b
+; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: uzp1 v1.4s, v2.4s, v3.4s
+; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: ret
%tmp1 = load <8 x double>, ptr %ptr
%tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
@@ -96,9 +92,8 @@ define <4 x i16> @fptoui_v4f64_to_v4i16(ptr %ptr) {
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-NEXT: xtn v1.2s, v1.2d
-; CHECK-NEXT: xtn v0.2s, v0.2d
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x double>, ptr %ptr
%tmp2 = fptoui <4 x double> %tmp1 to <4 x i16>
diff --git a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
index a1e0693..bc399c8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-SDAG
-; RUN: llc < %s -global-isel -global-isel-abort=2 -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-GISEL
+; RUN: llc < %s -global-isel -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-GISEL
define <4 x i8> @test_varidx_extract_v8s8(<8 x i8> %x, i32 %idx) {
; CHECK-SDAG-LABEL: test_varidx_extract_v8s8:
@@ -29,20 +29,20 @@ define <4 x i8> @test_varidx_extract_v8s8(<8 x i8> %x, i32 %idx) {
; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16
; CHECK-GISEL-NEXT: mov w9, w0
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GISEL-NEXT: mov b1, v0.b[1]
; CHECK-GISEL-NEXT: add x8, sp, #8
-; CHECK-GISEL-NEXT: str d0, [sp, #8]
; CHECK-GISEL-NEXT: and x9, x9, #0x7
-; CHECK-GISEL-NEXT: mov b2, v0.b[1]
+; CHECK-GISEL-NEXT: str d0, [sp, #8]
; CHECK-GISEL-NEXT: mov b3, v0.b[2]
; CHECK-GISEL-NEXT: lsl x10, x9, #1
; CHECK-GISEL-NEXT: mov b0, v0.b[3]
; CHECK-GISEL-NEXT: sub x9, x10, x9
-; CHECK-GISEL-NEXT: ldrb w8, [x8, x9]
-; CHECK-GISEL-NEXT: fmov s1, w8
-; CHECK-GISEL-NEXT: mov v1.h[1], v2.h[0]
-; CHECK-GISEL-NEXT: mov v1.h[2], v3.h[0]
-; CHECK-GISEL-NEXT: mov v1.h[3], v0.h[0]
-; CHECK-GISEL-NEXT: fmov d0, d1
+; CHECK-GISEL-NEXT: ldr b2, [x8, x9]
+; CHECK-GISEL-NEXT: mov v2.b[1], v1.b[0]
+; CHECK-GISEL-NEXT: mov v2.b[2], v3.b[0]
+; CHECK-GISEL-NEXT: mov v2.b[3], v0.b[0]
+; CHECK-GISEL-NEXT: ushll v0.8h, v2.8b, #0
+; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GISEL-NEXT: add sp, sp, #16
; CHECK-GISEL-NEXT: ret
%tmp = extractelement <8 x i8> %x, i32 %idx
@@ -176,17 +176,15 @@ define <2 x i16> @test_varidx_extract_v4s16(<4 x i16> %x, i32 %idx) {
; CHECK-GISEL: // %bb.0:
; CHECK-GISEL-NEXT: sub sp, sp, #16
; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GISEL-NEXT: mov w9, w0
-; CHECK-GISEL-NEXT: mov w8, #2 // =0x2
-; CHECK-GISEL-NEXT: add x10, sp, #8
-; CHECK-GISEL-NEXT: and x9, x9, #0x3
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GISEL-NEXT: mov w9, w0
+; CHECK-GISEL-NEXT: mov h1, v0.h[1]
+; CHECK-GISEL-NEXT: add x8, sp, #8
; CHECK-GISEL-NEXT: str d0, [sp, #8]
-; CHECK-GISEL-NEXT: madd x8, x9, x8, x10
-; CHECK-GISEL-NEXT: umov w9, v0.h[1]
-; CHECK-GISEL-NEXT: fmov s1, w9
-; CHECK-GISEL-NEXT: ldr h0, [x8]
-; CHECK-GISEL-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GISEL-NEXT: and x9, x9, #0x3
+; CHECK-GISEL-NEXT: ldr h0, [x8, x9, lsl #1]
+; CHECK-GISEL-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GISEL-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GISEL-NEXT: add sp, sp, #16
; CHECK-GISEL-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
index c58f4b1..f948d78 100644
--- a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
@@ -79,6 +79,145 @@ entry:
ret void
}
+; Test register allocation for an i32 result value of patchpoint.
+define i32 @generic_patchpoint_i32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_i32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in w0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call i32 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i32(i64 5, i32 4, ptr null, i32 0)
+ ret i32 %result
+}
+
+; Test register allocation for an i64 result value of patchpoint.
+define i64 @generic_patchpoint_i64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_i64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in x0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 4, ptr null, i32 0)
+ ret i64 %result
+}
+
+; Test register allocation for a ptr result value of patchpoint.
+define ptr @generic_patchpoint_p0() {
+entry:
+; CHECK-LABEL: generic_patchpoint_p0:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in x0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call ptr (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.p0(i64 5, i32 4, ptr null, i32 0)
+ ret ptr %result
+}
+
+; Test register allocation for a half result value of patchpoint.
+define half @generic_patchpoint_f16() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f16:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in h0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call half (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f16(i64 5, i32 4, ptr null, i32 0)
+ ret half %result
+}
+
+; Test register allocation for a float result value of patchpoint.
+define float @generic_patchpoint_f32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in s0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call float (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f32(i64 5, i32 4, ptr null, i32 0)
+ ret float %result
+}
+
+; Test register allocation for a double result value of patchpoint.
+define double @generic_patchpoint_f64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in d0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call double (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f64(i64 5, i32 4, ptr null, i32 0)
+ ret double %result
+}
+
+; Test register allocation for a <16 x i8> result value of patchpoint.
+define <16 x i8> @generic_patchpoint_v16i8() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v16i8:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.16b.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <16 x i8> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v16i8(i64 5, i32 4, ptr null, i32 0)
+ ret <16 x i8> %result
+}
+
+; Test register allocation for a <4 x i32> result value of patchpoint.
+define <4 x i32> @generic_patchpoint_v4i32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v4i32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.4s.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <4 x i32> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4i32(i64 5, i32 4, ptr null, i32 0)
+ ret <4 x i32> %result
+}
+
+; Test register allocation for a <4 x float> result value of patchpoint.
+define <4 x float> @generic_patchpoint_v4f32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v4f32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.4s.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <4 x float> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4f32(i64 5, i32 4, ptr null, i32 0)
+ ret <4 x float> %result
+}
+
+; Test register allocation for a <2 x double> result value of patchpoint.
+define <2 x double> @generic_patchpoint_v2f64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v2f64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.2d.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <2 x double> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v2f64(i64 5, i32 4, ptr null, i32 0)
+ ret <2 x double> %result
+}
+
declare void @llvm.experimental.stackmap(i64, i32, ...)
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i32 @llvm.experimental.patchpoint.i32(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare ptr @llvm.experimental.patchpoint.p0(i64, i32, ptr, i32, ...)
+declare half @llvm.experimental.patchpoint.f16(i64, i32, ptr, i32, ...)
+declare float @llvm.experimental.patchpoint.f32(i64, i32, ptr, i32, ...)
+declare double @llvm.experimental.patchpoint.f64(i64, i32, ptr, i32, ...)
+declare <16 x i8> @llvm.experimental.patchpoint.v16i8(i64, i32, ptr, i32, ...)
+declare <4 x i32> @llvm.experimental.patchpoint.v4i32(i64, i32, ptr, i32, ...)
+declare <4 x float> @llvm.experimental.patchpoint.v4f32(i64, i32, ptr, i32, ...)
+declare <2 x double> @llvm.experimental.patchpoint.v2f64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
index 77c7066..0ec2d76 100644
--- a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -2643,8 +2643,7 @@ define i8 @pr60530() {
;
; GISEL-LABEL: pr60530:
; GISEL: // %bb.0:
-; GISEL-NEXT: mov w8, #1 // =0x1
-; GISEL-NEXT: sbfx w0, w8, #0, #1
+; GISEL-NEXT: mov w0, #255 // =0xff
; GISEL-NEXT: ret
%1 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 0, i8 1)
%2 = extractvalue { i8, i1 } %1, 1
diff --git a/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir b/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir
index 859be2d..b940734 100644
--- a/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir
+++ b/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir
@@ -19,6 +19,8 @@
...
---
name: foo
+frameInfo:
+ adjustsStack: true
body: |
bb.0 (%ir-block.0):
; CHECK-LABEL: name: foo
diff --git a/llvm/test/CodeGen/AArch64/bitcast.ll b/llvm/test/CodeGen/AArch64/bitcast.ll
index bccfdb9..e0851fd 100644
--- a/llvm/test/CodeGen/AArch64/bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast.ll
@@ -1,16 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; PR23065: SCALAR_TO_VECTOR implies the top elements 1 to N-1 of the N-element vector are undefined.
-; CHECK-GI: warning: Instruction selection used fallback path for bitcast_v4i8_i32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_i32_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v2i16_i32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_i32_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v2i16_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v4i8_v2i16
-
define <4 x i16> @foo1(<2 x i32> %a) {
; CHECK-SD-LABEL: foo1:
; CHECK-SD: // %bb.0:
@@ -54,58 +47,104 @@ define <4 x i16> @foo2(<2 x i32> %a) {
; ===== To and From Scalar Types =====
define i32 @bitcast_v4i8_i32(<4 x i8> %a, <4 x i8> %b){
-; CHECK-LABEL: bitcast_v4i8_i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: xtn v0.8b, v0.8h
-; CHECK-NEXT: fmov w0, s0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v4i8_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: fmov w0, s0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v4i8_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov h3, v0.h[3]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], v3.h[0]
+; CHECK-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
%c = add <4 x i8> %a, %b
%d = bitcast <4 x i8> %c to i32
ret i32 %d
}
define <4 x i8> @bitcast_i32_v4i8(i32 %a, i32 %b){
-; CHECK-LABEL: bitcast_i32_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: zip1 v0.8b, v0.8b, v0.8b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_i32_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: fmov s0, w8
+; CHECK-SD-NEXT: zip1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_i32_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add i32 %a, %b
%d = bitcast i32 %c to <4 x i8>
ret <4 x i8> %d
}
define i32 @bitcast_v2i16_i32(<2 x i16> %a, <2 x i16> %b){
-; CHECK-LABEL: bitcast_v2i16_i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: mov w8, v0.s[1]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: ldr w0, [sp, #12]
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v2i16_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: strh w9, [sp, #12]
+; CHECK-SD-NEXT: strh w8, [sp, #14]
+; CHECK-SD-NEXT: ldr w0, [sp, #12]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v2i16_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
%c = add <2 x i16> %a, %b
%d = bitcast <2 x i16> %c to i32
ret i32 %d
}
define <2 x i16> @bitcast_i32_v2i16(i32 %a, i32 %b){
-; CHECK-LABEL: bitcast_i32_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_i32_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: fmov s0, w8
+; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_i32_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add i32 %a, %b
%d = bitcast i32 %c to <2 x i16>
ret <2 x i16> %d
@@ -362,40 +401,72 @@ define <8 x i16> @bitcast_v16i8_v8i16(<16 x i8> %a, <16 x i8> %b){
; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
define <4 x i8> @bitcast_v2i16_v4i8(<2 x i16> %a, <2 x i16> %b){
-; CHECK-LABEL: bitcast_v2i16_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: mov w8, v0.s[1]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: ldr s0, [sp, #12]
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v2i16_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: strh w9, [sp, #12]
+; CHECK-SD-NEXT: strh w8, [sp, #14]
+; CHECK-SD-NEXT: ldr s0, [sp, #12]
+; CHECK-SD-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v2i16_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add <2 x i16> %a, %b
%d = bitcast <2 x i16> %c to <4 x i8>
ret <4 x i8> %d
}
define <2 x i16> @bitcast_v4i8_v2i16(<4 x i8> %a, <4 x i8> %b){
-; CHECK-LABEL: bitcast_v4i8_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: add x8, sp, #12
-; CHECK-NEXT: xtn v0.8b, v0.8h
-; CHECK-NEXT: str s0, [sp, #12]
-; CHECK-NEXT: ld1 { v0.h }[0], [x8]
-; CHECK-NEXT: orr x8, x8, #0x2
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v4i8_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: add x8, sp, #12
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: str s0, [sp, #12]
+; CHECK-SD-NEXT: ld1 { v0.h }[0], [x8]
+; CHECK-SD-NEXT: orr x8, x8, #0x2
+; CHECK-SD-NEXT: ld1 { v0.h }[2], [x8]
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v4i8_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov h3, v0.h[3]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], v3.h[0]
+; CHECK-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add <4 x i8> %a, %b
%d = bitcast <4 x i8> %c to <2 x i16>
ret <2 x i16> %d
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index f4221ac..071613b 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-
-; CHECK-GI: warning: Instruction selection used fallback path for bswap_v2i16
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; ====== Scalar Tests =====
define i16 @bswap_i16(i16 %a){
@@ -103,11 +101,23 @@ declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
define <2 x i16> @bswap_v2i16(<2 x i16> %a){
-; CHECK-LABEL: bswap_v2i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: rev32 v0.8b, v0.8b
-; CHECK-NEXT: ushr v0.2s, v0.2s, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bswap_v2i16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: rev32 v0.8b, v0.8b
+; CHECK-SD-NEXT: ushr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bswap_v2i16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: rev16 v0.8b, v0.8b
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
entry:
%res = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %a)
ret <2 x i16> %res
diff --git a/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir b/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
index 9040937..1592c86 100644
--- a/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
+++ b/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
@@ -3,6 +3,8 @@
---
name: func
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0:
liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll
index 7b8448d..7cdb10e 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add.ll
@@ -1,23 +1,42 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16 -o - | FileCheck %s
-; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve -o - | FileCheck %s
-; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve2 -o - | FileCheck %s
+; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16 -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s --mattr=+complxnum,+neon,+fullfp16,+sve2 -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s --global-isel --global-isel-abort=2 --mattr=+complxnum,+neon,+fullfp16 -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s --global-isel --global-isel-abort=2 --mattr=+complxnum,+neon,+fullfp16,+sve -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s --global-isel --global-isel-abort=2 --mattr=+complxnum,+neon,+fullfp16,+sve2 -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target triple = "aarch64"
+; CHECK-GI: warning: Instruction selection used fallback path for complex_add_v16f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for complex_add_v32f16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for complex_add_v16f16_with_intrinsic
+
; Expected to not transform
define <2 x half> @complex_add_v2f16(<2 x half> %a, <2 x half> %b) {
-; CHECK-LABEL: complex_add_v2f16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov h2, v0.h[1]
-; CHECK-NEXT: mov h3, v1.h[1]
-; CHECK-NEXT: fsub h1, h1, h2
-; CHECK-NEXT: fadd h0, h3, h0
-; CHECK-NEXT: mov v1.h[1], v0.h[0]
-; CHECK-NEXT: fmov d0, d1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: complex_add_v2f16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: mov h2, v0.h[1]
+; CHECK-SD-NEXT: mov h3, v1.h[1]
+; CHECK-SD-NEXT: fsub h1, h1, h2
+; CHECK-SD-NEXT: fadd h0, h3, h0
+; CHECK-SD-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-SD-NEXT: fmov d0, d1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: complex_add_v2f16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: fsub h1, h1, h2
+; CHECK-GI-NEXT: fadd h0, h3, h0
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: fmov d0, d1
+; CHECK-GI-NEXT: ret
entry:
%a.real = shufflevector <2 x half> %a, <2 x half> zeroinitializer, <1 x i32> <i32 0>
%a.imag = shufflevector <2 x half> %a, <2 x half> zeroinitializer, <1 x i32> <i32 1>
@@ -162,17 +181,29 @@ entry:
; Expected not to transform as it is integer
define <16 x i16> @complex_add_v16i16(<16 x i16> %a, <16 x i16> %b) {
-; CHECK-LABEL: complex_add_v16i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: uzp1 v4.8h, v2.8h, v3.8h
-; CHECK-NEXT: uzp1 v5.8h, v0.8h, v1.8h
-; CHECK-NEXT: uzp2 v0.8h, v0.8h, v1.8h
-; CHECK-NEXT: uzp2 v1.8h, v2.8h, v3.8h
-; CHECK-NEXT: sub v2.8h, v4.8h, v0.8h
-; CHECK-NEXT: add v1.8h, v1.8h, v5.8h
-; CHECK-NEXT: zip1 v0.8h, v2.8h, v1.8h
-; CHECK-NEXT: zip2 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: complex_add_v16i16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: uzp1 v4.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: uzp1 v5.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: uzp2 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: uzp2 v1.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: sub v2.8h, v4.8h, v0.8h
+; CHECK-SD-NEXT: add v1.8h, v1.8h, v5.8h
+; CHECK-SD-NEXT: zip1 v0.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT: zip2 v1.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: complex_add_v16i16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: uzp1 v4.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: uzp2 v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: uzp2 v2.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: sub v1.8h, v1.8h, v0.8h
+; CHECK-GI-NEXT: add v2.8h, v2.8h, v4.8h
+; CHECK-GI-NEXT: zip1 v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: zip2 v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: ret
entry:
%a.real = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
%a.imag = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
diff --git a/llvm/test/CodeGen/AArch64/dllexport.ll b/llvm/test/CodeGen/AArch64/dllexport.ll
index 81ba674..580fb5f 100644
--- a/llvm/test/CodeGen/AArch64/dllexport.ll
+++ b/llvm/test/CodeGen/AArch64/dllexport.ll
@@ -1,5 +1,7 @@
; RUN: llc -mtriple aarch64-windows-gnu -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-GNU
; RUN: llc -mtriple aarch64-windows-msvc -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MSVC
+; RUN: llc -mtriple arm64ec-windows-gnu -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-GNU-EC
+; RUN: llc -mtriple arm64ec-windows-msvc -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MSVC-EC
define void @f() {
ret void
@@ -71,3 +73,40 @@ define weak_odr dllexport void @l() {
; CHECK-MSVC: .ascii " /EXPORT:s"
; CHECK-MSVC: .ascii " /EXPORT:t"
; CHECK-MSVC: .ascii " /EXPORT:u"
+
+; CHECK-GNU-EC-NOT: -export:f
+; CHECK-GNU-EC-NOT: -export:#f,EXPORTAS,f
+; CHECK-GNU-EC: .ascii " -export:#g,EXPORTAS,g
+; CHECK-GNU-EC: .ascii " -export:#h,EXPORTAS,h
+; CHECK-GNU-EC-NOT: -export:i
+; CHECK-GNU-EC-NOT: -export:#i,EXPORTAS,i
+; CHECK-GNU-EC: .ascii " -export:#j,EXPORTAS,j"
+; CHECK-GNU-EC: .ascii " -export:#k,EXPORTAS,k"
+; CHECK-GNU-EC: .ascii " -export:#l,EXPORTAS,l"
+; CHECK-GNU-EC: .ascii " -export:m,data"
+; CHECK-GNU-EC: .ascii " -export:n,data"
+; CHECK-GNU-EC: .ascii " -export:o,data"
+; CHECK-GNU-EC: .ascii " -export:p,data"
+; CHECK-GNU-EC: .ascii " -export:q,data"
+; CHECK-GNU-EC: .ascii " -export:r"
+; CHECK-GNU-EC: .ascii " -export:s"
+; CHECK-GNU-EC: .ascii " -export:t"
+; CHECK-GNU-EC: .ascii " -export:u"
+; CHECK-MSVC-EC-NOT: /EXPORT:f
+; CHECK-MSVC-EC-NOT: /EXPORT:#f,EXPORTAS,f
+; CHECK-MSVC-EC: .ascii " /EXPORT:#g,EXPORTAS,g"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#h,EXPORTAS,h"
+; CHECK-MSVC-EC-NOT: /EXPORT:i
+; CHECK-MSVC-EC-NOT: /EXPORT:#i,EXPORTAS,i
+; CHECK-MSVC-EC: .ascii " /EXPORT:#j,EXPORTAS,j"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#k,EXPORTAS,k"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#l,EXPORTAS,l"
+; CHECK-MSVC-EC: .ascii " /EXPORT:m,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:n,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:o,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:p,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:q,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:r"
+; CHECK-MSVC-EC: .ascii " /EXPORT:s"
+; CHECK-MSVC-EC: .ascii " /EXPORT:t"
+; CHECK-MSVC-EC: .ascii " /EXPORT:u"
diff --git a/llvm/test/CodeGen/AArch64/extbinopload.ll b/llvm/test/CodeGen/AArch64/extbinopload.ll
index 1f68c77..dff4831 100644
--- a/llvm/test/CodeGen/AArch64/extbinopload.ll
+++ b/llvm/test/CodeGen/AArch64/extbinopload.ll
@@ -650,7 +650,7 @@ define <16 x i32> @extrause_load(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
; CHECK-NEXT: add x11, x3, #12
; CHECK-NEXT: str s1, [x4]
; CHECK-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-NEXT: ldp s0, s5, [x2]
+; CHECK-NEXT: ldp s0, s4, [x2]
; CHECK-NEXT: ushll v2.8h, v0.8b, #0
; CHECK-NEXT: umov w9, v2.h[0]
; CHECK-NEXT: umov w10, v2.h[1]
@@ -662,24 +662,25 @@ define <16 x i32> @extrause_load(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
; CHECK-NEXT: ushll v2.8h, v2.8b, #0
; CHECK-NEXT: mov v0.b[10], w9
; CHECK-NEXT: add x9, x1, #4
-; CHECK-NEXT: uzp1 v1.8b, v1.8b, v2.8b
+; CHECK-NEXT: mov v1.d[1], v2.d[0]
; CHECK-NEXT: mov v0.b[11], w10
; CHECK-NEXT: add x10, x1, #12
+; CHECK-NEXT: bic v1.8h, #255, lsl #8
; CHECK-NEXT: ld1 { v0.s }[3], [x3], #4
-; CHECK-NEXT: ldr s4, [x0, #12]
-; CHECK-NEXT: ldp s3, s16, [x0, #4]
-; CHECK-NEXT: ld1 { v5.s }[1], [x3]
-; CHECK-NEXT: ldp s6, s7, [x2, #8]
-; CHECK-NEXT: ld1 { v4.s }[1], [x10]
-; CHECK-NEXT: ld1 { v3.s }[1], [x9]
-; CHECK-NEXT: ld1 { v6.s }[1], [x8]
-; CHECK-NEXT: ld1 { v7.s }[1], [x11]
+; CHECK-NEXT: ldr s3, [x0, #12]
+; CHECK-NEXT: ldp s2, s7, [x0, #4]
+; CHECK-NEXT: ld1 { v4.s }[1], [x3]
+; CHECK-NEXT: ldp s5, s6, [x2, #8]
+; CHECK-NEXT: ld1 { v3.s }[1], [x10]
+; CHECK-NEXT: ld1 { v2.s }[1], [x9]
+; CHECK-NEXT: ld1 { v5.s }[1], [x8]
+; CHECK-NEXT: ld1 { v6.s }[1], [x11]
; CHECK-NEXT: add x8, x1, #8
-; CHECK-NEXT: ld1 { v16.s }[1], [x8]
-; CHECK-NEXT: uaddl v2.8h, v3.8b, v4.8b
-; CHECK-NEXT: ushll v3.8h, v6.8b, #0
-; CHECK-NEXT: uaddl v4.8h, v5.8b, v7.8b
-; CHECK-NEXT: uaddl v1.8h, v1.8b, v16.8b
+; CHECK-NEXT: ld1 { v7.s }[1], [x8]
+; CHECK-NEXT: uaddl v2.8h, v2.8b, v3.8b
+; CHECK-NEXT: ushll v3.8h, v5.8b, #0
+; CHECK-NEXT: uaddl v4.8h, v4.8b, v6.8b
+; CHECK-NEXT: uaddw v1.8h, v1.8h, v7.8b
; CHECK-NEXT: uaddw2 v5.8h, v3.8h, v0.16b
; CHECK-NEXT: ushll v0.4s, v2.4h, #3
; CHECK-NEXT: ushll2 v2.4s, v2.8h, #3
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
new file mode 100644
index 0000000..504222e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -0,0 +1,1114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; CHECK-GI: warning: Instruction selection used fallback path for extract_v4i32_vector_insert
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for extract_v4i32_vector_insert_const
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for extract_v4i32_vector_extract
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for extract_v4i32_vector_extract_const
+
+define i64 @extract_v2i64_undef_index(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_undef_index:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov x0, d0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_undef_index:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: str q0, [sp, #-16]!
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: ldr x0, [sp], #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> %a, i32 undef
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_undef_vector(<2 x i64> %a, i32 %c) {
+; CHECK-LABEL: extract_v2i64_undef_vector:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> undef, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_opaque(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_opaque:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_opaque:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> %a, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_oob(<2 x i64> %a, i32 %c) {
+; CHECK-LABEL: extract_v2i64_oob:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> %a, i32 5
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_freeze(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_freeze:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_freeze:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %fvector = freeze <2 x i64> %a
+ %d = extractelement <2 x i64> %fvector, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_of_insert(<2 x i64> %a, i64 %element, i64 %c) {
+; CHECK-LABEL: extract_v2i64_extract_of_insert:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
+entry:
+ %vector = insertelement <2 x i64> %a, i64 %element, i64 %c
+ %d = extractelement <2 x i64> %vector, i64 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_of_insert_different_const(<2 x i64> %a, i64 %element) {
+; CHECK-SD-LABEL: extract_v2i64_extract_of_insert_different_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mov x0, v0.d[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_extract_of_insert_different_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: fmov x0, d0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = insertelement <2 x i64> %a, i64 %element, i64 0
+ %d = extractelement <2 x i64> %vector, i64 1
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_build_vector_const(<2 x i64> %a, i32 %c) {
+; CHECK-LABEL: extract_v2i64_extract_build_vector_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #11 // =0xb
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> <i64 42, i64 11>, i32 1
+ ret i64 %d
+}
+
+define i64 @extract_v2i64_extract_build_vector_opaque(<2 x i64> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i64_extract_build_vector_opaque:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI8_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i64_extract_build_vector_opaque:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI8_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr x0, [x9, x8, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <2 x i64> <i64 42, i64 11>, i32 %c
+ ret i64 %d
+}
+
+
+define i64 @extract_v2i32_zext(<2 x i32> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2i32_zext:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2i32_zext:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %zvector = zext <2 x i32> %a to <2 x i64>
+ %d = extractelement <2 x i64> %zvector, i32 %c
+ ret i64 %d
+}
+
+define i64 @extract_v2double_fptosi(<2 x double> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2double_fptosi:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr x0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2double_fptosi:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr x0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fptosi <2 x double> %a to <2 x i64>
+ %d = extractelement <2 x i64> %vector, i32 %c
+ ret i64 %d
+}
+
+define double @extract_v2double_fneg(<2 x double> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v2double_fneg:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: fneg v0.2d, v0.2d
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr d0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v2double_fneg:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fneg v0.2d, v0.2d
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr d0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fneg <2 x double> %a
+ %d = extractelement <2 x double> %vector, i32 %c
+ ret double %d
+}
+
+define i32 @extract_v4i32_add(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_add:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI12_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_add:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI12_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = add <4 x i32> %a, <i32 42, i32 11, i32 17, i32 6>
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define float @extract_v4i32_minimum(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_minimum:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr s0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_minimum:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr s0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.minimum.v4float(<4 x float> %a, <4 x float> %b)
+ %d = extractelement <4 x float> %vector, i32 %c
+ ret float %d
+}
+
+define float @extract_v4i32_minimum_build_vector(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_minimum_build_vector:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI14_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr s0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_minimum_build_vector:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI14_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr s0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.minimum.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 %c
+ ret float %d
+}
+
+define float @extract_v4i32_minimum_build_vector_const(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_minimum_build_vector_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: adrp x8, .LCPI15_0
+; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_0]
+; CHECK-NEXT: fmin v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: mov s0, v0.s[1]
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.minimum.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 1
+ ret float %d
+}
+
+define float @extract_v4i32_copysign_build_vector(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_copysign_build_vector:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI16_0
+; CHECK-SD-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI16_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr s0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_copysign_build_vector:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr s0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.copysign.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 %c
+ ret float %d
+}
+
+define float @extract_v4i32_copysign_build_vector_const(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_copysign_build_vector_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, .LCPI17_0
+; CHECK-SD-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI17_0]
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: mov s0, v0.s[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_copysign_build_vector_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x float> @llvm.copysign.v4float(<4 x float> %a, <4 x float> <float 42.0, float 11.0, float 17.0, float 6.0>)
+ %d = extractelement <4 x float> %vector, i32 2
+ ret float %d
+}
+
+
+define i32 @extract_v4i32_icmp(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_icmp:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI18_0
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI18_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_icmp:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI18_0
+; CHECK-GI-NEXT: movi v2.4s, #1
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI18_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = icmp sle <4 x i32> %a, <i32 42, i32 11, i32 17, i32 6>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_icmp_const(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_icmp_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, .LCPI19_0
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI19_0]
+; CHECK-SD-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT: mov w0, v0.s[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_icmp_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI19_0
+; CHECK-GI-NEXT: movi v2.4s, #1
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI19_0]
+; CHECK-GI-NEXT: cmge v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = icmp sle <4 x i32> %a, <i32 42, i32 11, i32 17, i32 6>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 2
+ ret i32 %d
+}
+
+define i32 @extract_v4float_fcmp(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4float_fcmp:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: movi v1.4s, #1
+; CHECK-SD-NEXT: fcmeq v0.4s, v0.4s, v0.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4float_fcmp:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: fmov v1.4s, #1.00000000
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: movi v1.4s, #1
+; CHECK-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fcmp uno <4 x float> %a, <float 1.0, float 1.0, float 1.0, float 1.0>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4float_fcmp_const(<4 x float> %a, <4 x float> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4float_fcmp_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: movi v1.4s, #1
+; CHECK-SD-NEXT: fcmeq v0.4s, v0.4s, v0.4s
+; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: mov w0, v0.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4float_fcmp_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov v1.4s, #1.00000000
+; CHECK-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: movi v1.4s, #1
+; CHECK-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov s0, v0.s[1]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = fcmp uno <4 x float> %a, <float 1.0, float 1.0, float 1.0, float 1.0>
+ %zvector = zext <4 x i1> %vector to <4 x i32>
+ %d = extractelement <4 x i32> %zvector, i32 1
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_select(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %cond) {
+; CHECK-SD-LABEL: extract_v4i32_select:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-SD-NEXT: adrp x8, .LCPI22_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI22_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-SD-NEXT: cmlt v1.4s, v1.4s, #0
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_select:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: adrp x8, .LCPI22_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI22_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = select <4 x i1> %cond, <4 x i32> %a, <4 x i32> <i32 42, i32 11, i32 17, i32 6>
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_select_const(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %cond) {
+; CHECK-SD-LABEL: extract_v4i32_select_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-SD-NEXT: movi v2.4s, #17
+; CHECK-SD-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-SD-NEXT: cmlt v1.4s, v1.4s, #0
+; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT: mov w0, v0.s[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_select_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: adrp x8, .LCPI23_0
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI23_0]
+; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = select <4 x i1> %cond, <4 x i32> %a, <4 x i32> <i32 42, i32 11, i32 17, i32 6>
+ %d = extractelement <4 x i32> %vector, i32 2
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_abs(<4 x float> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_abs:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: frintp v0.4s, v0.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: frintm v0.4s, v0.4s
+; CHECK-SD-NEXT: fabs v0.4s, v0.4s
+; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-SD-NEXT: abs v0.4s, v0.4s
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_abs:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: frintp v0.4s, v0.4s
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: and x9, x9, #0x3
+; CHECK-GI-NEXT: frintm v0.4s, v0.4s
+; CHECK-GI-NEXT: fabs v0.4s, v0.4s
+; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-NEXT: abs v0.4s, v0.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x8, x9, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %ceil = call <4 x float> @llvm.ceil.v4float(<4 x float> %a)
+ %floor = call <4 x float> @llvm.floor.v4float(<4 x float> %ceil)
+ %fabs = call <4 x float> @llvm.fabs.v4float(<4 x float> %floor)
+ %abs = fptosi <4 x float> %fabs to <4 x i32>
+ %vector = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %abs, i1 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_abs_const(<4 x float> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_abs_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mov w0, #4 // =0x4
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_abs_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI25_0
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI25_0]
+; CHECK-GI-NEXT: frintp v0.4s, v0.4s
+; CHECK-GI-NEXT: frintm v0.4s, v0.4s
+; CHECK-GI-NEXT: fabs v0.4s, v0.4s
+; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-NEXT: abs v0.4s, v0.4s
+; CHECK-GI-NEXT: mov s0, v0.s[1]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %ceil = call <4 x float> @llvm.ceil.v4float(<4 x float> <float 1.0, float 4.0, float 3.0, float 2.0>)
+ %floor = call <4 x float> @llvm.floor.v4float(<4 x float> %ceil)
+ %fabs = call <4 x float> @llvm.fabs.v4float(<4 x float> %floor)
+ %abs = fptosi <4 x float> %fabs to <4 x i32>
+ %vector = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %abs, i1 0)
+ %d = extractelement <4 x i32> %vector, i32 1
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_abs_half_const(<4 x float> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_abs_half_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: adrp x8, .LCPI26_0
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: ldr q0, [x8, :lo12:.LCPI26_0]
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_abs_half_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI26_0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI26_0]
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: frintp v0.4s, v0.4s
+; CHECK-GI-NEXT: frintm v0.4s, v0.4s
+; CHECK-GI-NEXT: fabs v0.4s, v0.4s
+; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-NEXT: abs v0.4s, v0.4s
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %ceil = call <4 x float> @llvm.ceil.v4float(<4 x float> <float 1.0, float 4.0, float 3.0, float 2.0>)
+ %floor = call <4 x float> @llvm.floor.v4float(<4 x float> %ceil)
+ %fabs = call <4 x float> @llvm.fabs.v4float(<4 x float> %floor)
+ %abs = fptosi <4 x float> %fabs to <4 x i32>
+ %vector = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %abs, i1 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_insert(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_insert:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: bfi x8, x0, #2, #2
+; CHECK-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-NEXT: str q1, [sp]
+; CHECK-NEXT: ldr w0, [x8]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> %a, <2 x i32> %b, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_insert_const(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_insert_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: mov w0, v1.s[1]
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> %a, <2 x i32> %b, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 1
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_extract(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_extract:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: str q0, [sp]
+; CHECK-NEXT: bfi x8, x0, #2, #2
+; CHECK-NEXT: ldr w0, [x8]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %a, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_extract_const(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_vector_extract_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %a, i64 0)
+ %d = extractelement <4 x i32> %vector, i32 0
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_load(<4 x i32> %a, <2 x i32> %b, i32 %c, ptr %arg) {
+; CHECK-SD-LABEL: extract_v4i32_load:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: and x8, x0, #0x3
+; CHECK-SD-NEXT: ldr w0, [x1, x8, lsl #2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_load:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: ldr w0, [x1, x8, lsl #2]
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = load <4 x i32>, ptr %arg
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_load_const(<4 x i32> %a, <2 x i32> %b, i32 %c, ptr %arg) {
+; CHECK-LABEL: extract_v4i32_load_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr w0, [x1]
+; CHECK-NEXT: ret
+entry:
+ %vector = load <4 x i32>, ptr %arg
+ %d = extractelement <4 x i32> %vector, i32 0
+ ret i32 %d
+}
+
+define double @extract_v4i32_bitcast(<4 x i32> %a, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_bitcast:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: bfi x8, x0, #3, #1
+; CHECK-SD-NEXT: ldr d0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_bitcast:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: mov w9, w0
+; CHECK-GI-NEXT: mov x8, sp
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: and x9, x9, #0x1
+; CHECK-GI-NEXT: ldr d0, [x8, x9, lsl #3]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = bitcast <4 x i32> %a to <2 x double>
+ %d = extractelement <2 x double> %vector, i32 %c
+ ret double %d
+}
+
+define double @extract_v4i32_bitcast_const(<4 x i32> %a, i32 %c) {
+; CHECK-LABEL: extract_v4i32_bitcast_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %vector = bitcast <4 x i32> %a to <2 x double>
+ %d = extractelement <2 x double> %vector, i32 0
+ ret double %d
+}
+
+define i32 @extract_v4i32_shuffle(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_shuffle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: uzp1 v1.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: mov v1.s[3], v0.s[3]
+; CHECK-SD-NEXT: str q1, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_shuffle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: adrp x8, .LCPI35_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI35_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 3>
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_shuffle_const(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_shuffle_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov w0, s1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_shuffle_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI36_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI36_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: mov s0, v0.s[2]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 3>
+ %d = extractelement <4 x i32> %vector, i32 2
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_splat(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-SD-LABEL: extract_v4i32_splat:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: movi v0.4s, #11
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_splat:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: movi v0.4s, #11
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %d = extractelement <4 x i32> splat (i32 11), i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_splat_const(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+; CHECK-LABEL: extract_v4i32_splat_const:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, #11 // =0xb
+; CHECK-NEXT: ret
+entry:
+ %d = extractelement <4 x i32> splat (i32 11), i32 0
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vp_add(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %mask, i32 %evl) {
+; CHECK-SD-LABEL: extract_v4i32_vp_add:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov x8, sp
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT: bfi x8, x0, #2, #2
+; CHECK-SD-NEXT: str q0, [sp]
+; CHECK-SD-NEXT: ldr w0, [x8]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_vp_add:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #16
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: mov w8, w0
+; CHECK-GI-NEXT: mov x9, sp
+; CHECK-GI-NEXT: and x8, x8, #0x3
+; CHECK-GI-NEXT: str q0, [sp]
+; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
+; CHECK-GI-NEXT: add sp, sp, #16
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ %d = extractelement <4 x i32> %vector, i32 %c
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_vp_add_const(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %mask, i32 %evl) {
+; CHECK-SD-LABEL: extract_v4i32_vp_add_const:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov w0, v0.s[3]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_vp_add_const:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: mov s0, v0.s[3]
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
+entry:
+ %vector = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ %d = extractelement <4 x i32> %vector, i32 3
+ ret i32 %d
+}
+
+define i32 @extract_v4i32_phi(i64 %val, i32 %limit, ptr %ptr) {
+; CHECK-SD-LABEL: extract_v4i32_phi:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: dup v1.2s, w0
+; CHECK-SD-NEXT: adrp x8, .LCPI41_0
+; CHECK-SD-NEXT: movi v0.2s, #16
+; CHECK-SD-NEXT: ldr d2, [x8, :lo12:.LCPI41_0]
+; CHECK-SD-NEXT: add v1.2s, v1.2s, v2.2s
+; CHECK-SD-NEXT: .LBB41_1: // %loop
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: fmov w8, s1
+; CHECK-SD-NEXT: add v1.2s, v1.2s, v0.2s
+; CHECK-SD-NEXT: cmp w8, w1
+; CHECK-SD-NEXT: add w0, w8, #10
+; CHECK-SD-NEXT: str w0, [x2, w8, sxtw #2]
+; CHECK-SD-NEXT: b.lo .LBB41_1
+; CHECK-SD-NEXT: // %bb.2: // %ret
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: extract_v4i32_phi:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, .LCPI41_0
+; CHECK-GI-NEXT: dup v0.2d, x0
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI41_0]
+; CHECK-GI-NEXT: add v1.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT: movi v0.2s, #16
+; CHECK-GI-NEXT: xtn v1.2s, v1.2d
+; CHECK-GI-NEXT: .LBB41_1: // %loop
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s1
+; CHECK-GI-NEXT: add v1.2s, v1.2s, v0.2s
+; CHECK-GI-NEXT: cmp w8, w1
+; CHECK-GI-NEXT: add w0, w9, #10
+; CHECK-GI-NEXT: str w0, [x2, w8, sxtw #2]
+; CHECK-GI-NEXT: b.lo .LBB41_1
+; CHECK-GI-NEXT: // %bb.2: // %ret
+; CHECK-GI-NEXT: ret
+entry:
+ %tempvector = insertelement <2 x i64> undef, i64 %val, i32 0
+ %vector = shufflevector <2 x i64> %tempvector, <2 x i64> undef, <2 x i32> zeroinitializer
+ %0 = add <2 x i64> %vector, <i64 1, i64 2>
+ %1 = trunc <2 x i64> %0 to <2 x i32>
+ br label %loop
+
+loop:
+ %2 = phi <2 x i32> [ %1, %entry ], [ %inc, %loop ]
+ %elt = extractelement <2 x i32> %2, i32 0
+ %end = icmp ult i32 %elt, %limit
+ %3 = add i32 10, %elt
+ %4 = sext i32 %elt to i64
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
+ %inc = add <2 x i32> %2, <i32 16, i32 16>
+ br i1 %end, label %loop, label %ret
+
+ret:
+ ret i32 %3
+}
+
+
diff --git a/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir b/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir
index e8c5819..e7e8c93 100644
--- a/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir
+++ b/llvm/test/CodeGen/AArch64/extractvector-oob-load.mir
@@ -22,11 +22,8 @@ body: |
; CHECK-LABEL: name: f
; CHECK: liveins: $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64))
- ; CHECK-NEXT: $x0 = COPY [[LOAD]](s64)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $x0 = COPY [[DEF]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%3:_(s64) = G_CONSTANT i64 224567957
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index 2d0b557..9916aee 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -1108,61 +1108,54 @@ define <7 x i32> @v7f16_i32(<7 x half> %a, <7 x half> %b, <7 x i32> %d, <7 x i32
;
; CHECK-GI-FP16-LABEL: v7f16_i32:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcmgt v1.8h, v1.8h, v0.8h
-; CHECK-GI-FP16-NEXT: mov w12, #31 // =0x1f
-; CHECK-GI-FP16-NEXT: ldr s4, [sp]
-; CHECK-GI-FP16-NEXT: fmov s2, w12
+; CHECK-GI-FP16-NEXT: fcmgt v0.8h, v1.8h, v0.8h
+; CHECK-GI-FP16-NEXT: mov w10, #31 // =0x1f
+; CHECK-GI-FP16-NEXT: ldr s3, [sp]
+; CHECK-GI-FP16-NEXT: fmov s1, w10
; CHECK-GI-FP16-NEXT: fmov s6, w0
-; CHECK-GI-FP16-NEXT: ldr s5, [sp, #8]
+; CHECK-GI-FP16-NEXT: ldr s4, [sp, #8]
; CHECK-GI-FP16-NEXT: ldr s7, [sp, #24]
; CHECK-GI-FP16-NEXT: ldr s16, [sp, #32]
-; CHECK-GI-FP16-NEXT: umov w9, v1.h[4]
-; CHECK-GI-FP16-NEXT: umov w8, v1.h[0]
-; CHECK-GI-FP16-NEXT: umov w11, v1.h[5]
-; CHECK-GI-FP16-NEXT: umov w10, v1.h[1]
-; CHECK-GI-FP16-NEXT: mov v2.s[1], w12
-; CHECK-GI-FP16-NEXT: umov w13, v1.h[2]
+; CHECK-GI-FP16-NEXT: umov w8, v0.h[4]
+; CHECK-GI-FP16-NEXT: umov w9, v0.h[5]
+; CHECK-GI-FP16-NEXT: mov v1.s[1], w10
; CHECK-GI-FP16-NEXT: mov v6.s[1], w1
; CHECK-GI-FP16-NEXT: mov v7.s[1], v16.s[0]
; CHECK-GI-FP16-NEXT: ldr s16, [sp, #40]
-; CHECK-GI-FP16-NEXT: fmov s3, w9
-; CHECK-GI-FP16-NEXT: fmov s0, w8
-; CHECK-GI-FP16-NEXT: umov w8, v1.h[6]
-; CHECK-GI-FP16-NEXT: mov v2.s[2], w12
-; CHECK-GI-FP16-NEXT: umov w9, v1.h[3]
+; CHECK-GI-FP16-NEXT: fmov s2, w8
+; CHECK-GI-FP16-NEXT: umov w8, v0.h[6]
+; CHECK-GI-FP16-NEXT: mov v1.s[2], w10
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: mov v6.s[2], w2
; CHECK-GI-FP16-NEXT: mov v7.s[2], v16.s[0]
-; CHECK-GI-FP16-NEXT: mov v3.s[1], w11
-; CHECK-GI-FP16-NEXT: mov v0.s[1], w10
-; CHECK-GI-FP16-NEXT: mov w10, #-1 // =0xffffffff
-; CHECK-GI-FP16-NEXT: fmov s1, w10
-; CHECK-GI-FP16-NEXT: neg v17.4s, v2.4s
+; CHECK-GI-FP16-NEXT: mov v2.s[1], w9
+; CHECK-GI-FP16-NEXT: mov w9, #-1 // =0xffffffff
+; CHECK-GI-FP16-NEXT: fmov s5, w9
+; CHECK-GI-FP16-NEXT: neg v17.4s, v1.4s
+; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
; CHECK-GI-FP16-NEXT: mov v6.s[3], w3
+; CHECK-GI-FP16-NEXT: mov v2.s[2], w8
+; CHECK-GI-FP16-NEXT: fmov w8, s3
+; CHECK-GI-FP16-NEXT: fmov s3, w7
+; CHECK-GI-FP16-NEXT: mov v5.s[1], w9
+; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: mov v3.s[1], w8
+; CHECK-GI-FP16-NEXT: fmov w8, s4
+; CHECK-GI-FP16-NEXT: ldr s4, [sp, #16]
+; CHECK-GI-FP16-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-GI-FP16-NEXT: fmov s2, w4
+; CHECK-GI-FP16-NEXT: mov v5.s[2], w9
+; CHECK-GI-FP16-NEXT: mov v2.s[1], w5
; CHECK-GI-FP16-NEXT: mov v3.s[2], w8
+; CHECK-GI-FP16-NEXT: sshl v1.4s, v1.4s, v17.4s
; CHECK-GI-FP16-NEXT: fmov w8, s4
-; CHECK-GI-FP16-NEXT: fmov s4, w7
-; CHECK-GI-FP16-NEXT: mov v0.s[2], w13
-; CHECK-GI-FP16-NEXT: mov v1.s[1], w10
-; CHECK-GI-FP16-NEXT: mov v4.s[1], w8
-; CHECK-GI-FP16-NEXT: fmov w8, s5
-; CHECK-GI-FP16-NEXT: ldr s5, [sp, #16]
-; CHECK-GI-FP16-NEXT: ushl v2.4s, v3.4s, v2.4s
-; CHECK-GI-FP16-NEXT: fmov s3, w4
-; CHECK-GI-FP16-NEXT: mov v0.s[3], w9
-; CHECK-GI-FP16-NEXT: mov v1.s[2], w10
-; CHECK-GI-FP16-NEXT: mov v3.s[1], w5
-; CHECK-GI-FP16-NEXT: mov v4.s[2], w8
-; CHECK-GI-FP16-NEXT: sshl v2.4s, v2.4s, v17.4s
-; CHECK-GI-FP16-NEXT: fmov w8, s5
-; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: eor v1.16b, v2.16b, v1.16b
-; CHECK-GI-FP16-NEXT: mov v3.s[2], w6
-; CHECK-GI-FP16-NEXT: mov v4.s[3], w8
-; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: and v1.16b, v7.16b, v1.16b
-; CHECK-GI-FP16-NEXT: and v2.16b, v3.16b, v2.16b
-; CHECK-GI-FP16-NEXT: bsl v0.16b, v6.16b, v4.16b
-; CHECK-GI-FP16-NEXT: orr v1.16b, v2.16b, v1.16b
+; CHECK-GI-FP16-NEXT: eor v4.16b, v1.16b, v5.16b
+; CHECK-GI-FP16-NEXT: mov v2.s[2], w6
+; CHECK-GI-FP16-NEXT: mov v3.s[3], w8
+; CHECK-GI-FP16-NEXT: and v1.16b, v2.16b, v1.16b
+; CHECK-GI-FP16-NEXT: and v2.16b, v7.16b, v4.16b
+; CHECK-GI-FP16-NEXT: bsl v0.16b, v6.16b, v3.16b
+; CHECK-GI-FP16-NEXT: orr v1.16b, v1.16b, v2.16b
; CHECK-GI-FP16-NEXT: mov s2, v0.s[1]
; CHECK-GI-FP16-NEXT: mov s3, v0.s[2]
; CHECK-GI-FP16-NEXT: mov s4, v0.s[3]
diff --git a/llvm/test/CodeGen/AArch64/fexplog.ll b/llvm/test/CodeGen/AArch64/fexplog.ll
index 519a297..93d3d96 100644
--- a/llvm/test/CodeGen/AArch64/fexplog.ll
+++ b/llvm/test/CodeGen/AArch64/fexplog.ll
@@ -36,6 +36,19 @@ entry:
ret half %c
}
+define <1 x double> @exp_v1f64(<1 x double> %x) {
+; CHECK-LABEL: exp_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl exp
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.exp.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @exp_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: exp_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -1293,6 +1306,19 @@ entry:
ret half %c
}
+define <1 x double> @exp2_v1f64(<1 x double> %x) {
+; CHECK-LABEL: exp2_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl exp2
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.exp2.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @exp2_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: exp2_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -2550,6 +2576,19 @@ entry:
ret half %c
}
+define <1 x double> @log_v1f64(<1 x double> %x) {
+; CHECK-LABEL: log_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl log
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.log.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @log_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: log_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -3807,6 +3846,19 @@ entry:
ret half %c
}
+define <1 x double> @log2_v1f64(<1 x double> %x) {
+; CHECK-LABEL: log2_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl log2
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.log2.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @log2_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: log2_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -5064,6 +5116,19 @@ entry:
ret half %c
}
+define <1 x double> @log10_v1f64(<1 x double> %x) {
+; CHECK-LABEL: log10_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl log10
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.log10.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @log10_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: log10_v2f64:
; CHECK-SD: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
index 1b1cfea..2ad5623 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
@@ -1,29 +1,50 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define {<2 x half>, <2 x half>} @vector_deinterleave_v2f16_v4f16(<4 x half> %vec) {
-; CHECK-LABEL: vector_deinterleave_v2f16_v4f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: dup v2.2s, v0.s[1]
-; CHECK-NEXT: mov v1.16b, v2.16b
-; CHECK-NEXT: mov v1.h[0], v0.h[1]
-; CHECK-NEXT: mov v0.h[1], v2.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vector_deinterleave_v2f16_v4f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: dup v2.2s, v0.s[1]
+; CHECK-SD-NEXT: mov v1.16b, v2.16b
+; CHECK-SD-NEXT: mov v1.h[0], v0.h[1]
+; CHECK-SD-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vector_deinterleave_v2f16_v4f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: uzp1 v2.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: uzp2 v1.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: mov h0, v2.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v2.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: ret
%retval = call {<2 x half>, <2 x half>} @llvm.experimental.vector.deinterleave2.v4f16(<4 x half> %vec)
ret {<2 x half>, <2 x half>} %retval
}
define {<4 x half>, <4 x half>} @vector_deinterleave_v4f16_v8f16(<8 x half> %vec) {
-; CHECK-LABEL: vector_deinterleave_v4f16_v8f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: uzp1 v2.4h, v0.4h, v1.4h
-; CHECK-NEXT: uzp2 v1.4h, v0.4h, v1.4h
-; CHECK-NEXT: fmov d0, d2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vector_deinterleave_v4f16_v8f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: uzp1 v2.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp2 v1.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: fmov d0, d2
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vector_deinterleave_v4f16_v8f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: uzp1 v2.8h, v0.8h, v0.8h
+; CHECK-GI-NEXT: uzp2 v1.8h, v0.8h, v0.8h
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: ret
%retval = call {<4 x half>, <4 x half>} @llvm.experimental.vector.deinterleave2.v8f16(<8 x half> %vec)
ret {<4 x half>, <4 x half>} %retval
}
@@ -40,13 +61,21 @@ define {<8 x half>, <8 x half>} @vector_deinterleave_v8f16_v16f16(<16 x half> %v
}
define {<2 x float>, <2 x float>} @vector_deinterleave_v2f32_v4f32(<4 x float> %vec) {
-; CHECK-LABEL: vector_deinterleave_v2f32_v4f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: zip1 v2.2s, v0.2s, v1.2s
-; CHECK-NEXT: zip2 v1.2s, v0.2s, v1.2s
-; CHECK-NEXT: fmov d0, d2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vector_deinterleave_v2f32_v4f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: zip1 v2.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: zip2 v1.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: fmov d0, d2
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vector_deinterleave_v2f32_v4f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: uzp1 v2.4s, v0.4s, v0.4s
+; CHECK-GI-NEXT: uzp2 v1.4s, v0.4s, v0.4s
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: ret
%retval = call {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32(<4 x float> %vec)
ret {<2 x float>, <2 x float>} %retval
}
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index 071c1ff..eb81aff 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <4 x half> @interleave2_v4f16(<2 x half> %vec0, <2 x half> %vec1) {
; CHECK-LABEL: interleave2_v4f16:
@@ -11,15 +12,22 @@ define <4 x half> @interleave2_v4f16(<2 x half> %vec0, <2 x half> %vec1) {
}
define <8 x half> @interleave2_v8f16(<4 x half> %vec0, <4 x half> %vec1) {
-; CHECK-LABEL: interleave2_v8f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: adrp x8, .LCPI1_0
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI1_0]
-; CHECK-NEXT: tbl v0.16b, { v0.16b }, v1.16b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: interleave2_v8f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: adrp x8, .LCPI1_0
+; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI1_0]
+; CHECK-SD-NEXT: tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_v8f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: zip1 v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: ret
%retval = call <8 x half> @llvm.experimental.vector.interleave2.v8f16(<4 x half> %vec0, <4 x half> %vec1)
ret <8 x half> %retval
}
@@ -36,14 +44,21 @@ define <16 x half> @interleave2_v16f16(<8 x half> %vec0, <8 x half> %vec1) {
}
define <4 x float> @interleave2_v4f32(<2 x float> %vec0, <2 x float> %vec1) {
-; CHECK-LABEL: interleave2_v4f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-NEXT: rev64 v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: interleave2_v4f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: rev64 v1.4s, v0.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_v4f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: zip1 v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: ret
%retval = call <4 x float> @llvm.experimental.vector.interleave2.v4f32(<2 x float> %vec0, <2 x float> %vec1)
ret <4 x float> %retval
}
diff --git a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
index 897d35a..8de0f0d 100644
--- a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
@@ -131,7 +131,7 @@ define i32 @f7() {
; GISEL-NEXT: ret
entry:
- %lshr = lshr i128 bitcast (<2 x i64> <i64 undef, i64 ptrtoint (ptr getelementptr inbounds ({ [9 x ptr], [8 x ptr] }, ptr @x3, i64 0, inrange i32 1, i64 2) to i64)> to i128), 64
+ %lshr = lshr i128 bitcast (<2 x i64> <i64 undef, i64 ptrtoint (ptr getelementptr inbounds ({ [9 x ptr], [8 x ptr] }, ptr @x3, i64 0, i32 1, i64 2) to i64)> to i128), 64
%trunc = trunc i128 %lshr to i64
%inttoptr = inttoptr i64 %trunc to ptr
%gep = getelementptr i32, ptr %inttoptr, i64 5
diff --git a/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll b/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll
index 1ea87bb..0a3b9a0 100644
--- a/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/fp-conversion-to-tbl.ll
@@ -73,9 +73,8 @@ define void @fptoui_v8f32_to_v8i8_no_loop(ptr %A, ptr %dst) {
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: fcvtzs.4s v1, v1
; CHECK-NEXT: fcvtzs.4s v0, v0
-; CHECK-NEXT: xtn.4h v1, v1
-; CHECK-NEXT: xtn.4h v0, v0
-; CHECK-NEXT: uzp1.8b v0, v0, v1
+; CHECK-NEXT: uzp1.8h v0, v0, v1
+; CHECK-NEXT: xtn.8b v0, v0
; CHECK-NEXT: str d0, [x1]
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index f80a8df..685efbb 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -1477,6 +1477,61 @@ define fp128 @fpext_f128_f64(double %x) #0 {
ret fp128 %val
}
+; CHECK-LABEL: sin_v1f64:
+; CHECK: bl sin
+define <1 x double> @sin_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.sin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: cos_v1f64:
+; CHECK: bl cos
+define <1 x double> @cos_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.cos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: pow_v1f64:
+; CHECK: bl pow
+define <1 x double> @pow_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.pow.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: log_v1f64:
+; CHECK: bl log
+define <1 x double> @log_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.log.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: log2_v1f64:
+; CHECK: bl log2
+define <1 x double> @log2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.log2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: log10_v1f64:
+; CHECK: bl log10
+define <1 x double> @log10_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.log10.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: exp_v1f64:
+; CHECK: bl exp
+define <1 x double> @exp_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.exp.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+; CHECK-LABEL: exp2_v1f64:
+; CHECK: bl exp2
+define <1 x double> @exp2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.exp2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/AArch64/fpow.ll b/llvm/test/CodeGen/AArch64/fpow.ll
index c2ad1aa..8d40121 100644
--- a/llvm/test/CodeGen/AArch64/fpow.ll
+++ b/llvm/test/CodeGen/AArch64/fpow.ll
@@ -37,6 +37,21 @@ entry:
ret half %c
}
+define <1 x double> @pow_v1f64(<1 x double> %x) {
+; CHECK-LABEL: pow_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: adrp x8, .LCPI3_0
+; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT: bl pow
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.pow.v1f64(<1 x double> %x, <1 x double> <double 3.140000e+00>)
+ ret <1 x double> %c
+}
+
define <2 x double> @pow_v2f64(<2 x double> %a, <2 x double> %b) {
; CHECK-SD-LABEL: pow_v2f64:
; CHECK-SD: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index 67190e8..01585d0 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -1,13 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
-
-; CHECK-GI-FP16: warning: Instruction selection used fallback path for fptos_v2f16_v2i16
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptou_v2f16_v2i16
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptos_v2f16_v2i8
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptou_v2f16_v2i8
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define i64 @fptos_f64_i64(double %a) {
; CHECK-LABEL: fptos_f64_i64:
@@ -1096,30 +1091,17 @@ entry:
}
define <3 x i16> @fptos_v3f64_v3i16(<3 x double> %a) {
-; CHECK-SD-LABEL: fptos_v3f64_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
-; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fptos_v3f64_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-GI-NEXT: fcvtzs v1.2d, v2.2d
-; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: xtn v0.4h, v0.4s
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fptos_v3f64_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: fcvtzs v1.2d, v2.2d
+; CHECK-NEXT: fcvtzs v0.2d, v0.2d
+; CHECK-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%c = fptosi <3 x double> %a to <3 x i16>
ret <3 x i16> %c
@@ -1134,9 +1116,8 @@ define <3 x i16> @fptou_v3f64_v3i16(<3 x double> %a) {
; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v3f64_v3i16:
@@ -1160,9 +1141,8 @@ define <4 x i16> @fptos_v4f64_v4i16(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v4f64_v4i16:
@@ -1182,9 +1162,8 @@ define <4 x i16> @fptou_v4f64_v4i16(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v4f64_v4i16:
@@ -1600,9 +1579,8 @@ define <3 x i8> @fptos_v3f64_v3i8(<3 x double> %a) {
; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: umov w0, v0.h[0]
; CHECK-SD-NEXT: umov w1, v0.h[1]
; CHECK-SD-NEXT: umov w2, v0.h[2]
@@ -1638,9 +1616,8 @@ define <3 x i8> @fptou_v3f64_v3i8(<3 x double> %a) {
; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
; CHECK-SD-NEXT: fcvtzs v1.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: umov w0, v0.h[0]
; CHECK-SD-NEXT: umov w1, v0.h[1]
; CHECK-SD-NEXT: umov w2, v0.h[2]
@@ -1672,9 +1649,8 @@ define <4 x i8> @fptos_v4f64_v4i8(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v4f64_v4i8:
@@ -1694,9 +1670,8 @@ define <4 x i8> @fptou_v4f64_v4i8(<4 x double> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v4f64_v4i8:
@@ -1718,13 +1693,10 @@ define <8 x i8> @fptos_v8f64_v8i8(<8 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v2.8b
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v8f64_v8i8:
@@ -1750,13 +1722,10 @@ define <8 x i8> @fptou_v8f64_v8i8(<8 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v2.8b
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v8f64_v8i8:
@@ -1786,21 +1755,13 @@ define <16 x i8> @fptos_v16f64_v16i8(<16 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v1.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v16f64_v16i8:
@@ -1837,21 +1798,13 @@ define <16 x i8> @fptou_v16f64_v16i8(<16 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v2.2d, v2.2d
; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d
; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v1.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v16f64_v16i8:
@@ -1900,36 +1853,20 @@ define <32 x i8> @fptos_v32f64_v32i8(<32 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v18.2d, v18.2d
; CHECK-SD-NEXT: fcvtzs v17.2d, v17.2d
; CHECK-SD-NEXT: fcvtzs v16.2d, v16.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: xtn v23.2s, v23.2d
-; CHECK-SD-NEXT: xtn v22.2s, v22.2d
-; CHECK-SD-NEXT: xtn v21.2s, v21.2d
-; CHECK-SD-NEXT: xtn v20.2s, v20.2d
-; CHECK-SD-NEXT: xtn v19.2s, v19.2d
-; CHECK-SD-NEXT: xtn v18.2s, v18.2d
-; CHECK-SD-NEXT: xtn v17.2s, v17.2d
-; CHECK-SD-NEXT: xtn v16.2s, v16.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v1.4h, v22.4h, v23.4h
-; CHECK-SD-NEXT: uzp1 v3.4h, v20.4h, v21.4h
-; CHECK-SD-NEXT: uzp1 v5.4h, v18.4h, v19.4h
-; CHECK-SD-NEXT: uzp1 v7.4h, v16.4h, v17.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: mov v3.d[1], v1.d[0]
-; CHECK-SD-NEXT: mov v7.d[1], v5.d[0]
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v3.4s, v20.4s, v21.4s
+; CHECK-SD-NEXT: uzp1 v1.4s, v22.4s, v23.4s
+; CHECK-SD-NEXT: uzp1 v5.4s, v18.4s, v19.4s
+; CHECK-SD-NEXT: uzp1 v7.4s, v16.4s, v17.4s
+; CHECK-SD-NEXT: uzp1 v4.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v1.8h, v3.8h, v1.8h
+; CHECK-SD-NEXT: uzp1 v2.8h, v7.8h, v5.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
-; CHECK-SD-NEXT: uzp1 v1.16b, v7.16b, v3.16b
+; CHECK-SD-NEXT: uzp1 v1.16b, v2.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v32f64_v32i8:
@@ -1997,36 +1934,20 @@ define <32 x i8> @fptou_v32f64_v32i8(<32 x double> %a) {
; CHECK-SD-NEXT: fcvtzs v18.2d, v18.2d
; CHECK-SD-NEXT: fcvtzs v17.2d, v17.2d
; CHECK-SD-NEXT: fcvtzs v16.2d, v16.2d
-; CHECK-SD-NEXT: xtn v7.2s, v7.2d
-; CHECK-SD-NEXT: xtn v6.2s, v6.2d
-; CHECK-SD-NEXT: xtn v5.2s, v5.2d
-; CHECK-SD-NEXT: xtn v4.2s, v4.2d
-; CHECK-SD-NEXT: xtn v3.2s, v3.2d
-; CHECK-SD-NEXT: xtn v2.2s, v2.2d
-; CHECK-SD-NEXT: xtn v1.2s, v1.2d
-; CHECK-SD-NEXT: xtn v0.2s, v0.2d
-; CHECK-SD-NEXT: xtn v23.2s, v23.2d
-; CHECK-SD-NEXT: xtn v22.2s, v22.2d
-; CHECK-SD-NEXT: xtn v21.2s, v21.2d
-; CHECK-SD-NEXT: xtn v20.2s, v20.2d
-; CHECK-SD-NEXT: xtn v19.2s, v19.2d
-; CHECK-SD-NEXT: xtn v18.2s, v18.2d
-; CHECK-SD-NEXT: xtn v17.2s, v17.2d
-; CHECK-SD-NEXT: xtn v16.2s, v16.2d
-; CHECK-SD-NEXT: uzp1 v6.4h, v6.4h, v7.4h
-; CHECK-SD-NEXT: uzp1 v4.4h, v4.4h, v5.4h
-; CHECK-SD-NEXT: uzp1 v2.4h, v2.4h, v3.4h
-; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: uzp1 v1.4h, v22.4h, v23.4h
-; CHECK-SD-NEXT: uzp1 v3.4h, v20.4h, v21.4h
-; CHECK-SD-NEXT: uzp1 v5.4h, v18.4h, v19.4h
-; CHECK-SD-NEXT: uzp1 v7.4h, v16.4h, v17.4h
-; CHECK-SD-NEXT: mov v4.d[1], v6.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-SD-NEXT: mov v3.d[1], v1.d[0]
-; CHECK-SD-NEXT: mov v7.d[1], v5.d[0]
+; CHECK-SD-NEXT: uzp1 v6.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT: uzp1 v4.4s, v4.4s, v5.4s
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: uzp1 v3.4s, v20.4s, v21.4s
+; CHECK-SD-NEXT: uzp1 v1.4s, v22.4s, v23.4s
+; CHECK-SD-NEXT: uzp1 v5.4s, v18.4s, v19.4s
+; CHECK-SD-NEXT: uzp1 v7.4s, v16.4s, v17.4s
+; CHECK-SD-NEXT: uzp1 v4.8h, v4.8h, v6.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-SD-NEXT: uzp1 v1.8h, v3.8h, v1.8h
+; CHECK-SD-NEXT: uzp1 v2.8h, v7.8h, v5.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v4.16b
-; CHECK-SD-NEXT: uzp1 v1.16b, v7.16b, v3.16b
+; CHECK-SD-NEXT: uzp1 v1.16b, v2.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v32f64_v32i8:
@@ -3026,9 +2947,8 @@ define <8 x i8> @fptos_v8f32_v8i8(<8 x float> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s
; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v8f32_v8i8:
@@ -3048,9 +2968,8 @@ define <8 x i8> @fptou_v8f32_v8i8(<8 x float> %a) {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s
; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptou_v8f32_v8i8:
@@ -3072,12 +2991,8 @@ define <16 x i8> @fptos_v16f32_v16i8(<16 x float> %a) {
; CHECK-SD-NEXT: fcvtzs v2.4s, v2.4s
; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s
; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v3.4h, v3.4s
-; CHECK-SD-NEXT: xtn v2.4h, v2.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v2.16b
; CHECK-SD-NEXT: ret
;
@@ -3134,20 +3049,12 @@ define <32 x i8> @fptos_v32f32_v32i8(<32 x float> %a) {
; CHECK-SD-NEXT: fcvtzs v6.4s, v6.4s
; CHECK-SD-NEXT: fcvtzs v5.4s, v5.4s
; CHECK-SD-NEXT: fcvtzs v4.4s, v4.4s
-; CHECK-SD-NEXT: xtn v3.4h, v3.4s
-; CHECK-SD-NEXT: xtn v2.4h, v2.4s
-; CHECK-SD-NEXT: xtn v1.4h, v1.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: xtn v7.4h, v7.4s
-; CHECK-SD-NEXT: xtn v6.4h, v6.4s
-; CHECK-SD-NEXT: xtn v5.4h, v5.4s
-; CHECK-SD-NEXT: xtn v4.4h, v4.4s
-; CHECK-SD-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT: mov v6.d[1], v7.d[0]
-; CHECK-SD-NEXT: mov v4.d[1], v5.d[0]
+; CHECK-SD-NEXT: uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-SD-NEXT: uzp1 v1.8h, v6.8h, v7.8h
+; CHECK-SD-NEXT: uzp1 v3.8h, v4.8h, v5.8h
; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v2.16b
-; CHECK-SD-NEXT: uzp1 v1.16b, v4.16b, v6.16b
+; CHECK-SD-NEXT: uzp1 v1.16b, v3.16b, v1.16b
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: fptos_v32f32_v32i8:
@@ -5272,8 +5179,13 @@ define <2 x i16> @fptos_v2f16_v2i16(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptos_v2f16_v2i16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5300,8 +5212,13 @@ define <2 x i16> @fptou_v2f16_v2i16(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptou_v2f16_v2i16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5746,8 +5663,13 @@ define <2 x i8> @fptos_v2f16_v2i8(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptos_v2f16_v2i8:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5774,8 +5696,13 @@ define <2 x i8> @fptou_v2f16_v2i8(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptou_v2f16_v2i8:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/fsincos.ll b/llvm/test/CodeGen/AArch64/fsincos.ll
index 2ab1610..0b34f95 100644
--- a/llvm/test/CodeGen/AArch64/fsincos.ll
+++ b/llvm/test/CodeGen/AArch64/fsincos.ll
@@ -36,6 +36,19 @@ entry:
ret half %c
}
+define <1 x double> @sin_v1f64(<1 x double> %x) {
+; CHECK-LABEL: sin_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl sin
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.sin.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @sin_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: sin_v2f64:
; CHECK-SD: // %bb.0: // %entry
@@ -1293,6 +1306,19 @@ entry:
ret half %c
}
+define <1 x double> @cos_v1f64(<1 x double> %x) {
+; CHECK-LABEL: cos_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl cos
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = call <1 x double> @llvm.cos.v1f64(<1 x double> %x)
+ ret <1 x double> %c
+}
+
define <2 x double> @cos_v2f64(<2 x double> %a) {
; CHECK-SD-LABEL: cos_v2f64:
; CHECK-SD: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/hadd-combine.ll b/llvm/test/CodeGen/AArch64/hadd-combine.ll
index 2269d75..491bf40 100644
--- a/llvm/test/CodeGen/AArch64/hadd-combine.ll
+++ b/llvm/test/CodeGen/AArch64/hadd-combine.ll
@@ -329,9 +329,29 @@ define <8 x i16> @hadds_i_undef(<8 x i16> %t, <8 x i16> %src1) {
ret <8 x i16> %result
}
+define <8 x i16> @sub_fixedwidth_v4i32(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: sub_fixedwidth_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %srl
+ ret <8 x i16> %res
+}
-
-
+define <8 x i16> @srhadd_fixedwidth_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: srhadd_fixedwidth_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %srl
+ ret <8 x i16> %res
+}
define <8 x i16> @rhaddu_base(<8 x i16> %src1, <8 x i16> %src2) {
; CHECK-LABEL: rhaddu_base:
@@ -859,6 +879,30 @@ define <4 x i32> @urhadd_v4i32(<4 x i32> %x) {
ret <4 x i32> %r
}
+define <8 x i16> @uhadd_fixedwidth_v4i32(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: uhadd_fixedwidth_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %srl
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @shadd_fixedwidth_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: shadd_fixedwidth_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %srl
+ ret <8 x i16> %res
+}
+
declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>)
declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir b/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
index aa94a037..47aa34e 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
+++ b/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
@@ -22,6 +22,7 @@
name: inst_stores_to_dead_spill_implicit_def_impdef
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
@@ -59,6 +60,7 @@ body: |
name: inst_stores_to_dead_spill_movimm_impdef
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir b/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
index e5395b2..a5d74ef 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
+++ b/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
@@ -4,6 +4,8 @@
---
name: widget
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
jumpTable:
kind: label-difference32
entries:
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector.ll b/llvm/test/CodeGen/AArch64/insert-subvector.ll
index d7656e1..6828fa9 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector.ll
@@ -374,18 +374,115 @@ define <16 x i8> @load_v16i8_8_2(float %tmp, <16 x i8> %b, ptr %a) {
ret <16 x i8> %s2
}
+define <8 x i8> @load_v8i8_2_1(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[0], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_15(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_15:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: adrp x8, .LCPI33_0
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT: tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 0, i32 1, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_2(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 0, i32 1, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_3(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_4(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[3], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 1>
+ ret <8 x i8> %s2
+}
+
+define <4 x i8> @load_v4i8_2_1(float %tmp, <4 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v4i8_2_1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: zip1 v0.8b, v0.8b, v0.8b
+; CHECK-NEXT: mov v0.s[1], v1.s[1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %s2 = shufflevector <4 x i8> %s1, <4 x i8> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x i8> %s2
+}
+
+define <4 x i8> @load_v4i8_2_2(float %tmp, <4 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v4i8_2_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: zip1 v2.8b, v0.8b, v0.8b
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %s2 = shufflevector <4 x i8> %s1, <4 x i8> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
+ ret <4 x i8> %s2
+}
+
; i16
define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[0], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[0], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -396,14 +493,10 @@ define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_15:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
; CHECK-NEXT: // kill: def $q1 killed $q1 def $q0_q1
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: adrp x8, .LCPI33_0
-; CHECK-NEXT: ld1 { v2.h }[2], [x9]
-; CHECK-NEXT: xtn v0.4h, v2.4s
-; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT: adrp x8, .LCPI40_0
+; CHECK-NEXT: ldr s0, [x0]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI40_0]
; CHECK-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
@@ -415,13 +508,8 @@ define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[1], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -432,13 +520,8 @@ define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_3:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[2], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[2], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -449,13 +532,8 @@ define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_4:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[3], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[3], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -466,11 +544,8 @@ define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, ptr %a) {
define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v4i16_2_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1 { v0.h }[0], [x0]
-; CHECK-NEXT: add x8, x0, #2
+; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-NEXT: mov v0.s[1], v1.s[1]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
@@ -483,11 +558,8 @@ define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) {
define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v4i16_2_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1 { v0.h }[0], [x0]
-; CHECK-NEXT: add x8, x0, #2
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: uzp1 v2.4h, v0.4h, v0.4h
; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr s2, [x0]
; CHECK-NEXT: mov v0.s[1], v2.s[0]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll
index 458bd7e..834417b 100644
--- a/llvm/test/CodeGen/AArch64/isinf.ll
+++ b/llvm/test/CodeGen/AArch64/isinf.ll
@@ -58,22 +58,14 @@ define i32 @replace_isinf_call_f64(double %x) {
define i32 @replace_isinf_call_f128(fp128 %x) {
; CHECK-LABEL: replace_isinf_call_f128:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: str q0, [sp]
-; CHECK-NEXT: ldrb w8, [sp, #15]
-; CHECK-NEXT: and w8, w8, #0x7f
-; CHECK-NEXT: strb w8, [sp, #15]
-; CHECK-NEXT: adrp x8, .LCPI3_0
-; CHECK-NEXT: ldr q0, [sp]
-; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
-; CHECK-NEXT: bl __eqtf2
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: str q0, [sp, #-16]!
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: ldp x9, x8, [sp], #16
+; CHECK-NEXT: and x8, x8, #0x7fffffffffffffff
+; CHECK-NEXT: eor x8, x8, #0x7fff000000000000
+; CHECK-NEXT: orr x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%abs = tail call fp128 @llvm.fabs.f128(fp128 %x)
%cmpinf = fcmp oeq fp128 %abs, 0xL00000000000000007FFF000000000000
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index 2164c2a..f5a7b5d 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -4,13 +4,6 @@
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
-; CHECK-GI: warning: Instruction selection used fallback path for stofp_v3i8_v3f64
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f64
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i8_v3f32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f32
-; CHECK-GI-NOFP16-NEXT: warning: Instruction selection used fallback path for stofp_v3i8_v3f16
-; CHECK-GI-NOFP16-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f16
-
define double @stofp_i64_f64(i64 %a) {
; CHECK-LABEL: stofp_i64_f64:
; CHECK: // %bb.0: // %entry
@@ -1754,47 +1747,109 @@ entry:
}
define <3 x double> @stofp_v3i8_v3f64(<3 x i8> %a) {
-; CHECK-LABEL: stofp_v3i8_v3f64:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: fmov s1, w2
-; CHECK-NEXT: mov v0.s[1], w1
-; CHECK-NEXT: shl v1.2s, v1.2s, #24
-; CHECK-NEXT: sshr v1.2s, v1.2s, #24
-; CHECK-NEXT: shl v0.2s, v0.2s, #24
-; CHECK-NEXT: sshll v1.2d, v1.2s, #0
-; CHECK-NEXT: sshr v0.2s, v0.2s, #24
-; CHECK-NEXT: scvtf v2.2d, v1.2d
-; CHECK-NEXT: sshll v0.2d, v0.2s, #0
-; CHECK-NEXT: // kill: def $d2 killed $d2 killed $q2
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: stofp_v3i8_v3f64:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: fmov s1, w2
+; CHECK-SD-NEXT: mov v0.s[1], w1
+; CHECK-SD-NEXT: shl v1.2s, v1.2s, #24
+; CHECK-SD-NEXT: sshr v1.2s, v1.2s, #24
+; CHECK-SD-NEXT: shl v0.2s, v0.2s, #24
+; CHECK-SD-NEXT: sshll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT: sshr v0.2s, v0.2s, #24
+; CHECK-SD-NEXT: scvtf v2.2d, v1.2d
+; CHECK-SD-NEXT: sshll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-SD-NEXT: scvtf v0.2d, v0.2d
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: stofp_v3i8_v3f64:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: smov x8, v0.s[0]
+; CHECK-GI-NEXT: smov x9, v0.s[1]
+; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: fmov d1, x8
+; CHECK-GI-NEXT: smov x8, v0.s[0]
+; CHECK-GI-NEXT: mov v1.d[1], x9
+; CHECK-GI-NEXT: smov x9, v0.s[1]
+; CHECK-GI-NEXT: fmov d2, x8
+; CHECK-GI-NEXT: scvtf v0.2d, v1.2d
+; CHECK-GI-NEXT: mov v2.d[1], x9
+; CHECK-GI-NEXT: mov d1, v0.d[1]
+; CHECK-GI-NEXT: scvtf v2.2d, v2.2d
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-GI-NEXT: ret
entry:
%c = sitofp <3 x i8> %a to <3 x double>
ret <3 x double> %c
}
define <3 x double> @utofp_v3i8_v3f64(<3 x i8> %a) {
-; CHECK-LABEL: utofp_v3i8_v3f64:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: movi d1, #0x0000ff000000ff
-; CHECK-NEXT: fmov s2, w2
-; CHECK-NEXT: mov v0.s[1], w1
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: and v1.8b, v2.8b, v1.8b
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: ushll v1.2d, v1.2s, #0
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ucvtf v2.2d, v1.2d
-; CHECK-NEXT: // kill: def $d2 killed $d2 killed $q2
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: utofp_v3i8_v3f64:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: movi d1, #0x0000ff000000ff
+; CHECK-SD-NEXT: fmov s2, w2
+; CHECK-SD-NEXT: mov v0.s[1], w1
+; CHECK-SD-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT: and v1.8b, v2.8b, v1.8b
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: ushll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-SD-NEXT: ucvtf v2.2d, v1.2d
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: utofp_v3i8_v3f64:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov w8, v0.s[0]
+; CHECK-GI-NEXT: mov w9, v0.s[1]
+; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: fmov d1, x8
+; CHECK-GI-NEXT: mov w8, v0.s[0]
+; CHECK-GI-NEXT: mov v1.d[1], x9
+; CHECK-GI-NEXT: mov w9, v0.s[1]
+; CHECK-GI-NEXT: fmov d2, x8
+; CHECK-GI-NEXT: ucvtf v0.2d, v1.2d
+; CHECK-GI-NEXT: mov v2.d[1], x9
+; CHECK-GI-NEXT: mov d1, v0.d[1]
+; CHECK-GI-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-GI-NEXT: ret
entry:
%c = uitofp <3 x i8> %a to <3 x double>
ret <3 x double> %c
@@ -3372,31 +3427,71 @@ entry:
}
define <3 x float> @stofp_v3i8_v3f32(<3 x i8> %a) {
-; CHECK-LABEL: stofp_v3i8_v3f32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: stofp_v3i8_v3f32:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w1
+; CHECK-SD-NEXT: mov v0.h[2], w2
+; CHECK-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: scvtf v0.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: stofp_v3i8_v3f32:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: scvtf v0.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%c = sitofp <3 x i8> %a to <3 x float>
ret <3 x float> %c
}
define <3 x float> @utofp_v3i8_v3f32(<3 x i8> %a) {
-; CHECK-LABEL: utofp_v3i8_v3f32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: utofp_v3i8_v3f32:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w1
+; CHECK-SD-NEXT: mov v0.h[2], w2
+; CHECK-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: utofp_v3i8_v3f32:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%c = uitofp <3 x i8> %a to <3 x float>
ret <3 x float> %c
@@ -5521,7 +5616,8 @@ define <2 x half> @stofp_v2i8_v2f16(<2 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-FP16-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-FP16-NEXT: shl v0.4h, v0.4h, #8
; CHECK-GI-FP16-NEXT: sshr v0.4h, v0.4h, #8
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
@@ -5580,7 +5676,14 @@ define <2 x half> @utofp_v2i8_v2f16(<2 x i8> %a) {
;
; CHECK-GI-FP16-LABEL: utofp_v2i8_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-FP16-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-FP16-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
; CHECK-GI-FP16-NEXT: movi d1, #0x0000ff000000ff
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
@@ -5620,11 +5723,20 @@ define <3 x half> @stofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-NOFP16-LABEL: stofp_v3i8_v3f16:
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fmov s0, w0
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w1
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w2
+; CHECK-GI-NOFP16-NEXT: fmov s1, w1
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: fmov s1, w2
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
; CHECK-GI-NOFP16-NEXT: shl v0.4h, v0.4h, #8
; CHECK-GI-NOFP16-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NOFP16-NEXT: mov v1.h[1], v3.h[0]
; CHECK-GI-NOFP16-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NOFP16-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-GI-NOFP16-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NOFP16-NEXT: scvtf v0.4s, v0.4s
; CHECK-GI-NOFP16-NEXT: fcvtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
@@ -5633,11 +5745,10 @@ define <3 x half> @stofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: fmov s0, w0
; CHECK-GI-FP16-NEXT: fmov s1, w1
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.b[1], v1.b[0]
; CHECK-GI-FP16-NEXT: fmov s1, w2
-; CHECK-GI-FP16-NEXT: mov v0.h[2], v1.h[0]
-; CHECK-GI-FP16-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-GI-FP16-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-FP16-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-FP16-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-GI-FP16-NEXT: scvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5669,10 +5780,20 @@ define <3 x half> @utofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-NOFP16-LABEL: utofp_v3i8_v3f16:
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fmov s0, w0
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w1
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w2
-; CHECK-GI-NOFP16-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-GI-NOFP16-NEXT: fmov s1, w1
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: fmov s1, w2
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NOFP16-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NOFP16-NEXT: mov v1.h[1], v3.h[0]
; CHECK-GI-NOFP16-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NOFP16-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NOFP16-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NOFP16-NEXT: ucvtf v0.4s, v0.4s
; CHECK-GI-NOFP16-NEXT: fcvtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
@@ -5681,11 +5802,10 @@ define <3 x half> @utofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: fmov s0, w0
; CHECK-GI-FP16-NEXT: fmov s1, w1
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.b[1], v1.b[0]
; CHECK-GI-FP16-NEXT: fmov s1, w2
-; CHECK-GI-FP16-NEXT: mov v0.h[2], v1.h[0]
-; CHECK-GI-FP16-NEXT: movi d1, #0xff00ff00ff00ff
-; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-FP16-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-FP16-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/llvm.exp10.ll b/llvm/test/CodeGen/AArch64/llvm.exp10.ll
index 56f4272..51d17ad 100644
--- a/llvm/test/CodeGen/AArch64/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AArch64/llvm.exp10.ll
@@ -532,11 +532,18 @@ define double @exp10_f64(double %x) {
ret double %r
}
-; FIXME: Broken
-; define <1 x double> @exp10_v1f64(<1 x double> %x) {
-; %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x)
-; ret <1 x double> %r
-; }
+define <1 x double> @exp10_v1f64(<1 x double> %x) {
+; CHECK-LABEL: exp10_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl exp10
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x)
+ ret <1 x double> %r
+}
define <2 x double> @exp10_v2f64(<2 x double> %x) {
; SDAG-LABEL: exp10_v2f64:
diff --git a/llvm/test/CodeGen/AArch64/load.ll b/llvm/test/CodeGen/AArch64/load.ll
index 39143e5..c3c0ec5 100644
--- a/llvm/test/CodeGen/AArch64/load.ll
+++ b/llvm/test/CodeGen/AArch64/load.ll
@@ -159,7 +159,8 @@ define <2 x i16> @load_v2i16(ptr %ptr){
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr h0, [x0]
; CHECK-GI-NEXT: ldr h1, [x0, #2]
-; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
%a = load <2 x i16>, ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
index 23cf1dc..5b379c2 100644
--- a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
+++ b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
@@ -10,7 +10,6 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: liveins: $w0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $x8 = ORRXrs $xzr, $x0, 0, implicit $w0
; CHECK-NEXT: $w8 = ORRWrs $wzr, $w0, 0, implicit-def $x8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
diff --git a/llvm/test/CodeGen/AArch64/misched-bundle.mir b/llvm/test/CodeGen/AArch64/misched-bundle.mir
new file mode 100644
index 0000000..a947c04
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/misched-bundle.mir
@@ -0,0 +1,195 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a510 -run-pass=machine-scheduler -debug-only=machine-scheduler %s -o - 2>&1 | FileCheck %s
+# REQUIRES: asserts
+
+# CHECK: SU(0): renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 4
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(6): Out Latency=1
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z0
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(1): renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 4
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Out Latency=1
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z1
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(2): renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z2
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(3): renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z3
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(4): renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z4
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(5): renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z5
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(6): $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0(tied-def 0), killed renamable $z1, killed renamable $z2
+# CHECK-NEXT: # preds left : 4
+# CHECK-NEXT: # succs left : 2
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 4
+# CHECK-NEXT: Depth : 3
+# CHECK-NEXT: Height : 4
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(2): Data Latency=3 Reg=$z2
+# CHECK-NEXT: SU(1): Data Latency=3 Reg=$z1
+# CHECK-NEXT: SU(0): Out Latency=1
+# CHECK-NEXT: SU(0): Data Latency=3 Reg=$z0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(8): Data Latency=4 Reg=$z0
+# CHECK-NEXT: SU(7): Anti Latency=0
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(7): BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit killed $z4, implicit killed $z3
+# CHECK-NEXT: # preds left : 5
+# CHECK-NEXT: # succs left : 1
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 3
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(6): Anti Latency=0
+# CHECK-NEXT: SU(5): Data Latency=0 Reg=$z5
+# CHECK-NEXT: SU(4): Data Latency=0 Reg=$z4
+# CHECK-NEXT: SU(3): Data Latency=0 Reg=$z3
+# CHECK-NEXT: SU(1): Out Latency=1
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(9): Data Latency=0 Reg=$z1
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(8): ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size, align 1)
+# CHECK-NEXT: # preds left : 7
+# CHECK-NEXT: # succs left : 1
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 7
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(6): Data Latency=4 Reg=$z0
+# CHECK-NEXT: SU(5): Ord Latency=0 Memory
+# CHECK-NEXT: SU(4): Ord Latency=0 Memory
+# CHECK-NEXT: SU(3): Ord Latency=0 Memory
+# CHECK-NEXT: SU(2): Ord Latency=0 Memory
+# CHECK-NEXT: SU(1): Ord Latency=0 Memory
+# CHECK-NEXT: SU(0): Ord Latency=0 Memory
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(9): ST1H killed renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size, align 1)
+# CHECK-NEXT: # preds left : 8
+# CHECK-NEXT: # succs left : 0
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 7
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z1
+# CHECK-NEXT: SU(5): Ord Latency=0 Memory
+# CHECK-NEXT: SU(4): Ord Latency=0 Memory
+# CHECK-NEXT: SU(3): Ord Latency=0 Memory
+# CHECK-NEXT: SU(2): Ord Latency=0 Memory
+# CHECK-NEXT: SU(1): Ord Latency=0 Memory
+# CHECK-NEXT: SU(0): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: ExitSU: RET_ReallyLR
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 0
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 0
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+
+---
+name: test
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $p0, $x0, $x1, $x2, $x10, $x11, $x12, $x13
+
+ ; CHECK-LABEL: name: test
+ ; CHECK: liveins: $p0, $x0, $x1, $x2, $x10, $x11, $x12, $x13
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0, renamable $z1, killed renamable $z2
+ ; CHECK-NEXT: renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size, align 1)
+ ; CHECK-NEXT: BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit $z4, implicit $z3 {
+ ; CHECK-NEXT: $z1 = MOVPRFX_ZZ $z5
+ ; CHECK-NEXT: $z1 = FMLA_ZPmZZ_H renamable $p0, internal $z1, renamable $z4, renamable $z3
+ ; CHECK-NEXT: }
+ ; CHECK-NEXT: ST1H renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size, align 1)
+ ; CHECK-NEXT: RET_ReallyLR
+
+ renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size)
+ renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size)
+ renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size)
+ renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size)
+ renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size)
+ renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size)
+ $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0, killed renamable $z1, killed renamable $z2
+ BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit killed $z4, implicit killed $z3 {
+ $z1 = MOVPRFX_ZZ $z5
+ $z1 = FMLA_ZPmZZ_H renamable $p0, internal killed $z1, killed renamable $z4, killed renamable $z3
+ }
+ ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size)
+ ST1H killed renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size)
+ RET_ReallyLR
+
+...
diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index 0162065..57f220f 100644
--- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1117,8 +1117,15 @@ define <4 x i16> @vselect_constant_cond_zero_v4i16(<4 x i16> %a) {
;
; CHECK-GI-LABEL: vselect_constant_cond_zero_v4i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI84_0
-; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI84_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: mov v3.16b, v1.16b
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v1.b[0]
+; CHECK-GI-NEXT: ushll v1.8h, v3.8b, #0
; CHECK-GI-NEXT: shl v1.4h, v1.4h, #15
; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #15
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
@@ -1137,8 +1144,16 @@ define <4 x i32> @vselect_constant_cond_zero_v4i32(<4 x i32> %a) {
;
; CHECK-GI-LABEL: vselect_constant_cond_zero_v4i32:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI85_0
-; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI85_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: mov v3.16b, v1.16b
+; CHECK-GI-NEXT: mov v3.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v2.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v3.4h, #0
+; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v2.d[0]
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
@@ -1181,8 +1196,15 @@ define <4 x i16> @vselect_constant_cond_v4i16(<4 x i16> %a, <4 x i16> %b) {
;
; CHECK-GI-LABEL: vselect_constant_cond_v4i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI87_0
-; CHECK-GI-NEXT: ldr d2, [x8, :lo12:.LCPI87_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: mov v4.16b, v2.16b
+; CHECK-GI-NEXT: mov v4.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v4.b[2], v3.b[0]
+; CHECK-GI-NEXT: mov v4.b[3], v2.b[0]
+; CHECK-GI-NEXT: ushll v2.8h, v4.8b, #0
; CHECK-GI-NEXT: shl v2.4h, v2.4h, #15
; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #15
; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b
@@ -1201,8 +1223,16 @@ define <4 x i32> @vselect_constant_cond_v4i32(<4 x i32> %a, <4 x i32> %b) {
;
; CHECK-GI-LABEL: vselect_constant_cond_v4i32:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI88_0
-; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI88_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: mov v4.16b, v2.16b
+; CHECK-GI-NEXT: mov v4.h[1], v3.h[0]
+; CHECK-GI-NEXT: mov v3.h[1], v2.h[0]
+; CHECK-GI-NEXT: ushll v2.4s, v4.4h, #0
+; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
+; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31
; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index 632b6b3..dbb5dfeb 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -2870,6 +2870,107 @@ define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
ret <2 x i64> %tmp4
}
+define <2 x i32> @fcmal2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-SD-LABEL: fcmal2xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal2xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi v0.2s, #1
+; CHECK-GI-NEXT: shl v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-SD-LABEL: fcmal4xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal4xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v0.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmal2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-SD-LABEL: fcmal2xdouble:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal2xdouble:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI221_0
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI221_0]
+; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmnv2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmnv2xfloat:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: ret
+ %tmp3 = fcmp false <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmnv4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-SD-LABEL: fcmnv4xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmnv4xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #0 // =0x0
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v0.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp false <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmnv2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmnv2xdouble:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: ret
+ %tmp3 = fcmp false <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmoeqz2xfloat:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/neon-truncstore.ll b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
index b677d077..5d78ad2 100644
--- a/llvm/test/CodeGen/AArch64/neon-truncstore.ll
+++ b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
@@ -104,7 +104,7 @@ define void @v4i32_v4i8(<4 x i32> %a, ptr %result) {
; CHECK-LABEL: v4i32_v4i8:
; CHECK: // %bb.0:
; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x0]
; CHECK-NEXT: ret
%b = trunc <4 x i32> %a to <4 x i8>
@@ -170,8 +170,7 @@ define void @v2i16_v2i8(<2 x i16> %a, ptr %result) {
define void @v4i16_v4i8(<4 x i16> %a, ptr %result) {
; CHECK-LABEL: v4i16_v4i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x0]
; CHECK-NEXT: ret
%b = trunc <4 x i16> %a to <4 x i8>
diff --git a/llvm/test/CodeGen/AArch64/overflow.ll b/llvm/test/CodeGen/AArch64/overflow.ll
index 444aaeb..977141f 100644
--- a/llvm/test/CodeGen/AArch64/overflow.ll
+++ b/llvm/test/CodeGen/AArch64/overflow.ll
@@ -19,20 +19,12 @@ entry:
}
define zeroext i1 @saddo1.i32.fold(i32 %v1, i32 %v2, ptr %res) {
-; SDAG-LABEL: saddo1.i32.fold:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w8, #20 // =0x14
-; SDAG-NEXT: mov w0, wzr
-; SDAG-NEXT: str w8, [x2]
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: saddo1.i32.fold:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #9 // =0x9
-; GISEL-NEXT: adds w8, w8, #11
-; GISEL-NEXT: cset w0, vs
-; GISEL-NEXT: str w8, [x2]
-; GISEL-NEXT: ret
+; CHECK-LABEL: saddo1.i32.fold:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #20 // =0x14
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: str w8, [x2]
+; CHECK-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 9, i32 11)
%val = extractvalue {i32, i1} %t, 0
@@ -72,21 +64,10 @@ entry:
}
define i32 @saddo.select.i64(i32 %v1, i32 %v2, i1 %v3, i64 %v4, i64 %v5) {
-; SDAG-LABEL: saddo.select.i64:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, w1
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: saddo.select.i64:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #13 // =0xd
-; GISEL-NEXT: and x9, x3, #0xc
-; GISEL-NEXT: and x8, x4, x8
-; GISEL-NEXT: cmn x9, x8
-; GISEL-NEXT: cset w8, vs
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w0, w1, ne
-; GISEL-NEXT: ret
+; CHECK-LABEL: saddo.select.i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
entry:
%lhs = and i64 %v4, 12
%rhs = and i64 %v5, 13
@@ -97,22 +78,10 @@ entry:
}
define i32 @uaddo.select.i64(i32 %v1, i32 %v2, i1 %v3, i64 %v4, i64 %v5) {
-; SDAG-LABEL: uaddo.select.i64:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, w1
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: uaddo.select.i64:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #9 // =0x9
-; GISEL-NEXT: mov w9, #10 // =0xa
-; GISEL-NEXT: and x8, x3, x8
-; GISEL-NEXT: and x9, x4, x9
-; GISEL-NEXT: cmn x8, x9
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w0, w1, ne
-; GISEL-NEXT: ret
+; CHECK-LABEL: uaddo.select.i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
entry:
%lhs = and i64 %v4, 9
%rhs = and i64 %v5, 10
@@ -123,18 +92,11 @@ entry:
}
define zeroext i1 @saddo.canon.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; SDAG-LABEL: saddo.canon.i32:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, wzr
-; SDAG-NEXT: str w4, [x5]
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: saddo.canon.i32:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: adds w8, wzr, w4
-; GISEL-NEXT: cset w0, vs
-; GISEL-NEXT: str w8, [x5]
-; GISEL-NEXT: ret
+; CHECK-LABEL: saddo.canon.i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: str w4, [x5]
+; CHECK-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 0, i32 %v5)
%val = extractvalue {i32, i1} %t, 0
@@ -143,13 +105,19 @@ entry:
ret i1 %obit
}
define zeroext i1 @saddo.add.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; CHECK-LABEL: saddo.add.i32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: add w8, w4, #100
-; CHECK-NEXT: subs w8, w8, #100
-; CHECK-NEXT: cset w0, vs
-; CHECK-NEXT: str w8, [x5]
-; CHECK-NEXT: ret
+; SDAG-LABEL: saddo.add.i32:
+; SDAG: // %bb.0: // %entry
+; SDAG-NEXT: add w8, w4, #100
+; SDAG-NEXT: subs w8, w8, #100
+; SDAG-NEXT: cset w0, vs
+; SDAG-NEXT: str w8, [x5]
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: saddo.add.i32:
+; GISEL: // %bb.0: // %entry
+; GISEL-NEXT: mov w0, wzr
+; GISEL-NEXT: str w4, [x5]
+; GISEL-NEXT: ret
entry:
%lhs = add nsw i32 %v5, 100
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %lhs, i32 -100)
@@ -160,13 +128,20 @@ entry:
}
define zeroext i1 @uaddo.add.i32(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %res) {
-; CHECK-LABEL: uaddo.add.i32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: add w8, w4, #5
-; CHECK-NEXT: adds w8, w8, #5
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: str w8, [x5]
-; CHECK-NEXT: ret
+; SDAG-LABEL: uaddo.add.i32:
+; SDAG: // %bb.0: // %entry
+; SDAG-NEXT: add w8, w4, #5
+; SDAG-NEXT: adds w8, w8, #5
+; SDAG-NEXT: cset w0, hs
+; SDAG-NEXT: str w8, [x5]
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: uaddo.add.i32:
+; GISEL: // %bb.0: // %entry
+; GISEL-NEXT: adds w8, w4, #10
+; GISEL-NEXT: cset w0, hs
+; GISEL-NEXT: str w8, [x5]
+; GISEL-NEXT: ret
entry:
%lhs = add nuw i32 %v5, 5
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %lhs, i32 5)
diff --git a/llvm/test/CodeGen/AArch64/peephole-movd.mir b/llvm/test/CodeGen/AArch64/peephole-movd.mir
new file mode 100644
index 0000000..bd7f0ab
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/peephole-movd.mir
@@ -0,0 +1,60 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=aarch64-mi-peephole-opt -o - -mtriple=aarch64-unknown-linux -verify-machineinstrs %s | FileCheck %s
+
+---
+name: remove_kill_flags
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $w0
+ ; CHECK-LABEL: name: remove_kill_flags
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
+ ; CHECK-NEXT: [[UQSHLv8i8_shift:%[0-9]+]]:fpr64 = UQSHLv8i8_shift killed [[COPY]], 1
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr128 = SUBREG_TO_REG 0, [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[TBLv8i8One:%[0-9]+]]:fpr64 = TBLv8i8One killed [[SUBREG_TO_REG]], [[UQSHLv8i8_shift]]
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:fpr128 = MOVIv2d_ns 0
+ %1:fpr64 = COPY %0.dsub:fpr128
+ %2:fpr64 = UQSHLv8i8_shift killed %1:fpr64, 1
+ %3:fpr64 = FMOVDr %2:fpr64
+ %4:fpr128 = SUBREG_TO_REG 0, killed %3:fpr64, %subreg.dsub
+ %5:fpr64 = TBLv8i8One killed %4:fpr128, %2:fpr64
+ %7:fpr128 = IMPLICIT_DEF
+ %6:fpr128 = INSERT_SUBREG %7:fpr128, killed %2:fpr64, %subreg.dsub
+ RET_ReallyLR implicit $w0
+...
+---
+name: remove_kill_flags2
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $w0
+ ; CHECK-LABEL: name: remove_kill_flags2
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
+ ; CHECK-NEXT: [[UQSHLv8i8_shift:%[0-9]+]]:fpr64 = UQSHLv8i8_shift killed [[COPY]], 1
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr128 = SUBREG_TO_REG 0, [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:fpr128 = MOVIv2d_ns 0
+ %1:fpr64 = COPY %0.dsub:fpr128
+ %2:fpr64 = UQSHLv8i8_shift killed %1:fpr64, 1
+ %3:fpr64 = FMOVDr %2:fpr64
+ %4:fpr128 = SUBREG_TO_REG 0, %3:fpr64, %subreg.dsub
+ %7:fpr128 = IMPLICIT_DEF
+ %6:fpr128 = INSERT_SUBREG %7:fpr128, killed %2:fpr64, %subreg.dsub
+ %9:fpr128 = IMPLICIT_DEF
+ %8:fpr128 = INSERT_SUBREG %9:fpr128, killed %3:fpr64, %subreg.dsub
+ RET_ReallyLR implicit $w0
+...
+
diff --git a/llvm/test/CodeGen/AArch64/pr86717.ll b/llvm/test/CodeGen/AArch64/pr86717.ll
new file mode 100644
index 0000000..aa8be95
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr86717.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+define <16 x i8> @f(i32 %0) {
+; CHECK-LABEL: f:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: mov x9, sp
+; CHECK-NEXT: sub w8, w8, w0
+; CHECK-NEXT: bfxil x9, x8, #0, #4
+; CHECK-NEXT: mov w8, #3 // =0x3
+; CHECK-NEXT: str q0, [sp]
+; CHECK-NEXT: strb w8, [x9]
+; CHECK-NEXT: ldr q0, [sp], #16
+; CHECK-NEXT: ret
+ %2 = sub nuw i32 1, %0
+ %3 = insertelement <16 x i8> zeroinitializer, i8 3, i32 %2
+ ret <16 x i8> %3
+}
diff --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index 932b230..934ff44 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -147,10 +147,10 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
; CHECK-NEXT: mov v19.16b, v23.16b
; CHECK-NEXT: mov v3.d[1], x20
; CHECK-NEXT: mov v23.16b, v27.16b
-; CHECK-NEXT: mov v27.16b, v9.16b
-; CHECK-NEXT: mul x15, x4, x5
; CHECK-NEXT: add v27.2d, v9.2d, v1.2d
+; CHECK-NEXT: mul x15, x4, x5
; CHECK-NEXT: str q11, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: mov v11.16b, v15.16b
; CHECK-NEXT: mov v4.d[1], x22
; CHECK-NEXT: add v19.2d, v19.2d, v1.2d
; CHECK-NEXT: add v7.2d, v7.2d, v1.2d
@@ -171,9 +171,7 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
; CHECK-NEXT: mov v10.16b, v26.16b
; CHECK-NEXT: mov v14.d[1], x13
; CHECK-NEXT: mov v22.16b, v31.16b
-; CHECK-NEXT: mov v20.16b, v8.16b
; CHECK-NEXT: ldp q26, q31, [sp] // 32-byte Folded Reload
-; CHECK-NEXT: mov v11.16b, v15.16b
; CHECK-NEXT: mov v0.d[1], x12
; CHECK-NEXT: add v13.2d, v13.2d, v14.2d
; CHECK-NEXT: add v31.2d, v31.2d, v14.2d
diff --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index 5f905d9..6f1ae02 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -145,7 +145,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: shl v0.4h, v0.4h, #8
; CHECK-NEXT: sqadd v0.4h, v0.4h, v1.4h
; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
index bb9546a..8be63b0 100644
--- a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i1 @load_bv_v4i8(i1 zeroext %a) {
; CHECK-LABEL: load_bv_v4i8:
@@ -11,18 +12,31 @@ define i1 @load_bv_v4i8(i1 zeroext %a) {
}
define noundef i1 @logger(i32 noundef %logLevel, ptr %ea, ptr %pll) {
-; CHECK-LABEL: logger:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr w8, [x2]
-; CHECK-NEXT: cmp w8, w0
-; CHECK-NEXT: b.ls .LBB1_2
-; CHECK-NEXT: // %bb.1:
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB1_2: // %land.rhs
-; CHECK-NEXT: ldr x8, [x1]
-; CHECK-NEXT: ldrb w0, [x8]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: logger:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: ldr w8, [x2]
+; CHECK-SD-NEXT: cmp w8, w0
+; CHECK-SD-NEXT: b.ls .LBB1_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB1_2: // %land.rhs
+; CHECK-SD-NEXT: ldr x8, [x1]
+; CHECK-SD-NEXT: ldrb w0, [x8]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: logger:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: ldr w8, [x2]
+; CHECK-GI-NEXT: cmp w8, w0
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.hi .LBB1_2
+; CHECK-GI-NEXT: // %bb.1: // %land.rhs
+; CHECK-GI-NEXT: ldr x8, [x1]
+; CHECK-GI-NEXT: ldrb w8, [x8]
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: .LBB1_2: // %land.end
+; CHECK-GI-NEXT: ret
entry:
%0 = load i32, ptr %pll, align 4
%cmp.not = icmp ugt i32 %0, %logLevel
@@ -44,12 +58,18 @@ land.end: ; preds = %land.rhs, %entry
declare i64 @llvm.ctlz.i64(i64 %in, i1)
define i1 @lshr_ctlz_undef_cmpeq_one_i64(i64 %in) {
-; CHECK-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: clz x8, x0
-; CHECK-NEXT: lsr x0, x8, #6
-; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: clz x8, x0
+; CHECK-SD-NEXT: lsr x0, x8, #6
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: clz x8, x0
+; CHECK-GI-NEXT: lsr w0, w8, #6
+; CHECK-GI-NEXT: ret
%ctlz = call i64 @llvm.ctlz.i64(i64 %in, i1 -1)
%lshr = lshr i64 %ctlz, 6
%icmp = icmp eq i64 %lshr, 1
@@ -57,17 +77,30 @@ define i1 @lshr_ctlz_undef_cmpeq_one_i64(i64 %in) {
}
define i32 @PR17487(i1 %tobool) {
-; CHECK-LABEL: PR17487:
-; CHECK: // %bb.0:
-; CHECK-NEXT: dup v0.2s, w0
-; CHECK-NEXT: mov w8, #1 // =0x1
-; CHECK-NEXT: dup v1.2d, x8
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: bic v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: mov x8, v0.d[1]
-; CHECK-NEXT: cmp x8, #1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: PR17487:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: dup v0.2s, w0
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: dup v1.2d, x8
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: mov x8, v0.d[1]
+; CHECK-SD-NEXT: cmp x8, #1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: PR17487:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: mov v0.d[1], x0
+; CHECK-GI-NEXT: adrp x8, .LCPI3_0
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-GI-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: fmov x8, d0
+; CHECK-GI-NEXT: cmp x8, #1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
%tmp = insertelement <2 x i1> undef, i1 %tobool, i32 1
%tmp1 = zext <2 x i1> %tmp to <2 x i64>
%tmp2 = xor <2 x i64> %tmp1, <i64 1, i64 1>
diff --git a/llvm/test/CodeGen/AArch64/sext.ll b/llvm/test/CodeGen/AArch64/sext.ll
index 61f04fb..3e0d5dd8 100644
--- a/llvm/test/CodeGen/AArch64/sext.ll
+++ b/llvm/test/CodeGen/AArch64/sext.ll
@@ -280,13 +280,12 @@ define <3 x i64> @sext_v3i8_v3i64(<3 x i8> %a) {
;
; CHECK-GI-LABEL: sext_v3i8_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d0, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: fmov s0, w0
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: sxtb x8, w2
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v0.d[1], x1
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #56
; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #56
; CHECK-GI-NEXT: mov d1, v0.d[1]
@@ -444,13 +443,12 @@ define <3 x i64> @sext_v3i10_v3i64(<3 x i10> %a) {
;
; CHECK-GI-LABEL: sext_v3i10_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d0, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: fmov s0, w0
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: sbfx x8, x2, #0, #10
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v0.d[1], x1
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #54
; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #54
; CHECK-GI-NEXT: mov d1, v0.d[1]
diff --git a/llvm/test/CodeGen/AArch64/shift.ll b/llvm/test/CodeGen/AArch64/shift.ll
index 5287839..9c8d3e0 100644
--- a/llvm/test/CodeGen/AArch64/shift.ll
+++ b/llvm/test/CodeGen/AArch64/shift.ll
@@ -1,13 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-
-; CHECK-GI: warning: Instruction selection used fallback path for shl_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shl_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ashr_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ashr_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lshr_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lshr_v2i16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i1 @shl_i1(i1 %0, i1 %1){
; CHECK-SD-LABEL: shl_i1:
@@ -530,11 +523,38 @@ define <2 x i64> @lshr_v2i64(<2 x i64> %0, <2 x i64> %1){
; ===== Vector Larger/Smaller than Legal =====
define <4 x i8> @shl_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: shl_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: ushl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: shl_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shl_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov h4, v0.h[2]
+; CHECK-GI-NEXT: mov h5, v0.h[3]
+; CHECK-GI-NEXT: mov h6, v1.h[3]
+; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v1.h[2]
+; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v6.b[0]
+; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = shl <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -556,12 +576,27 @@ define <32 x i8> @shl_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @shl_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: shl_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: ushl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: shl_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: ushl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shl_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v0.s[1]
+; CHECK-GI-NEXT: mov s3, v1.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = shl <2 x i16> %0, %1
ret <2 x i16> %3
}
@@ -633,14 +668,42 @@ define <4 x i64> @shl_v4i64(<4 x i64> %0, <4 x i64> %1){
}
define <4 x i8> @ashr_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: ashr_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: neg v1.4h, v1.4h
-; CHECK-NEXT: sshl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: ashr_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: neg v1.4h, v1.4h
+; CHECK-SD-NEXT: sshl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: ashr_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h3, v0.h[1]
+; CHECK-GI-NEXT: mov h4, v1.h[2]
+; CHECK-GI-NEXT: mov h5, v1.h[3]
+; CHECK-GI-NEXT: mov h6, v0.h[3]
+; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: neg v1.8b, v1.8b
+; CHECK-GI-NEXT: sshl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = ashr <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -658,15 +721,31 @@ define <32 x i8> @ashr_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @ashr_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: ashr_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: shl v0.2s, v0.2s, #16
-; CHECK-NEXT: sshr v0.2s, v0.2s, #16
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: neg v1.2s, v1.2s
-; CHECK-NEXT: sshl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: ashr_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: shl v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: sshr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: neg v1.2s, v1.2s
+; CHECK-SD-NEXT: sshl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: ashr_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v1.s[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s3, v0.s[1]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: neg v1.4h, v1.4h
+; CHECK-GI-NEXT: sshl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = ashr <2 x i16> %0, %1
ret <2 x i16> %3
}
@@ -727,13 +806,41 @@ define <4 x i64> @ashr_v4i64(<4 x i64> %0, <4 x i64> %1){
}
define <4 x i8> @lshr_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: lshr_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: neg v1.4h, v1.4h
-; CHECK-NEXT: ushl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT: neg v1.4h, v1.4h
+; CHECK-SD-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h3, v0.h[1]
+; CHECK-GI-NEXT: mov h4, v1.h[2]
+; CHECK-GI-NEXT: mov h5, v1.h[3]
+; CHECK-GI-NEXT: mov h6, v0.h[3]
+; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: neg v1.8b, v1.8b
+; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = lshr <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -751,14 +858,30 @@ define <32 x i8> @lshr_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @lshr_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: lshr_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: neg v1.2s, v1.2s
-; CHECK-NEXT: ushl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: and v0.8b, v0.8b, v2.8b
+; CHECK-SD-NEXT: neg v1.2s, v1.2s
+; CHECK-SD-NEXT: ushl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v1.s[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s3, v0.s[1]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: neg v1.4h, v1.4h
+; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = lshr <2 x i16> %0, %1
ret <2 x i16> %3
}
diff --git a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
index 0ef6478..fb571ef 100644
--- a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
+++ b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
@@ -353,13 +353,17 @@ define <8 x i8> @shuffle4_v8i8_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x
define <8 x i16> @shuffle4_v4i8_zext(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; CHECK-LABEL: shuffle4_v4i8_zext:
; CHECK: // %bb.0:
-; CHECK-NEXT: uzp1 v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: uzp1 v1.8b, v2.8b, v3.8b
+; CHECK-NEXT: fmov d5, d2
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: // kill: def $d3 killed $d3 def $q3
; CHECK-NEXT: adrp x8, .LCPI8_0
-; CHECK-NEXT: ushll v2.8h, v0.8b, #0
+; CHECK-NEXT: fmov d4, d0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI8_0]
-; CHECK-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-NEXT: tbl v0.16b, { v2.16b, v3.16b }, v0.16b
+; CHECK-NEXT: mov v4.d[1], v1.d[0]
+; CHECK-NEXT: mov v5.d[1], v3.d[0]
+; CHECK-NEXT: bic v4.8h, #255, lsl #8
+; CHECK-NEXT: bic v5.8h, #255, lsl #8
+; CHECK-NEXT: tbl v0.16b, { v4.16b, v5.16b }, v0.16b
; CHECK-NEXT: ret
%x = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%y = shufflevector <4 x i8> %c, <4 x i8> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/AArch64/shufflevector.ll b/llvm/test/CodeGen/AArch64/shufflevector.ll
index d79f3ae..b1131f2 100644
--- a/llvm/test/CodeGen/AArch64/shufflevector.ll
+++ b/llvm/test/CodeGen/AArch64/shufflevector.ll
@@ -202,7 +202,7 @@ define i32 @shufflevector_v4i8(<4 x i8> %a, <4 x i8> %b){
; CHECK-SD-NEXT: ext v0.8b, v1.8b, v0.8b, #6
; CHECK-SD-NEXT: zip1 v1.4h, v1.4h, v0.4h
; CHECK-SD-NEXT: ext v0.8b, v0.8b, v1.8b, #4
-; CHECK-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: add sp, sp, #16
; CHECK-SD-NEXT: ret
@@ -390,7 +390,7 @@ define i32 @shufflevector_v4i8_zeroes(<4 x i8> %a, <4 x i8> %b){
; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: dup v0.4h, v0.h[0]
-; CHECK-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: add sp, sp, #16
; CHECK-SD-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll b/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll
new file mode 100644
index 0000000..cd5046a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mattr=+sme -stop-after=finalize-isel < %s | FileCheck %s --check-prefix=CHECK-COALESCER-BARRIER
+; RUN: llc -mattr=+sme -stop-after=virtregrewriter < %s | FileCheck %s --check-prefix=CHECK-REGALLOC
+
+target triple = "aarch64"
+
+define void @dont_coalesce_args(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_args
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: liveins: $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: {{ $}}
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: [[DEF:%[0-9]+]]:zpr = IMPLICIT_DEF
+ ; CHECK-COALESCER-BARRIER-NEXT: [[INSERT_SUBREG:%[0-9]+]]:zpr = INSERT_SUBREG [[DEF]], [[COALESCER_BARRIER_FPR128_]], %subreg.zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: $z0 = COPY [[INSERT_SUBREG]]
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_args
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: liveins: $q0
+ ; CHECK-REGALLOC-NEXT: {{ $}}
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL killed renamable $q0, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR
+ %sa = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %a, i64 0)
+ call void @scalable_args(<vscale x 2 x i64> %sa)
+ ret void
+}
+
+define <2 x i64> @dont_coalesce_res() "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_res
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_res, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $z0
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:zpr = COPY $z0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY [[COPY]].zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY1]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def $q0, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: $q0 = COPY [[COALESCER_BARRIER_FPR128_]]
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR implicit $q0
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_res
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_res, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL renamable $q0, implicit killed $z0
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def dead $q0, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR implicit $q0
+ %sa = call <vscale x 2 x i64> @scalable_res()
+ %res = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %sa, i64 0)
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @dont_coalesce_arg_that_is_also_res(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_arg_that_is_also_res
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: liveins: $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: {{ $}}
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: [[DEF:%[0-9]+]]:zpr = IMPLICIT_DEF
+ ; CHECK-COALESCER-BARRIER-NEXT: [[INSERT_SUBREG:%[0-9]+]]:zpr = INSERT_SUBREG [[DEF]], [[COALESCER_BARRIER_FPR128_]], %subreg.zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: $z0 = COPY [[INSERT_SUBREG]]
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_1:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COALESCER_BARRIER_FPR128_]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def $q0, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: $q0 = COPY [[COALESCER_BARRIER_FPR128_1]]
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR implicit $q0
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_arg_that_is_also_res
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: liveins: $q0
+ ; CHECK-REGALLOC-NEXT: {{ $}}
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL killed renamable $q0, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def dead $q0, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR implicit $q0
+ %sa = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %a, i64 0)
+ call void @scalable_args(<vscale x 2 x i64> %sa)
+ ret <2 x i64> %a
+}
+
+declare void @scalable_args(<vscale x 2 x i64>) "aarch64_pstate_sm_enabled"
+declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
+
+declare <vscale x 2 x i64> @scalable_res() "aarch64_pstate_sm_enabled"
+declare <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64>, i64)
diff --git a/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll b/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll
index 3fa1ee5..dba3227 100644
--- a/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll
+++ b/llvm/test/CodeGen/AArch64/sme-call-streaming-compatible-to-normal-fn-wihout-sme-attr.ll
@@ -38,4 +38,43 @@ define void @streaming_compatible() #0 {
declare void @non_streaming()
+
+; Verify that COALESCER_BARRIER is also supported without +sme.
+
+define void @streaming_compatible_arg(float %f) #0 {
+; CHECK-LABEL: streaming_compatible_arg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
+; CHECK-NEXT: bl __arm_sme_state
+; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
+; CHECK-NEXT: and x19, x0, #0x1
+; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
+; CHECK-NEXT: tbz w19, #0, .LBB1_2
+; CHECK-NEXT: // %bb.1:
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
+; CHECK-NEXT: bl non_streaming
+; CHECK-NEXT: tbz w19, #0, .LBB1_4
+; CHECK-NEXT: // %bb.3:
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: .LBB1_4:
+; CHECK-NEXT: ldp x30, x19, [sp, #80] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: ret
+ call void @non_streaming(float %f)
+ ret void
+}
+
+
attributes #0 = { nounwind "aarch64_pstate_sm_compatible" }
diff --git a/llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir b/llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir
new file mode 100644
index 0000000..e6cce9a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-machine-licm-vg.mir
@@ -0,0 +1,64 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=early-machinelicm %s -verify-machineinstrs -o - | FileCheck %s
+---
+name: test_should_hoist_pfalse
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: test_should_hoist_pfalse
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64all = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64all = COPY [[COPY]]
+ ; CHECK-NEXT: [[PFALSE:%[0-9]+]]:ppr = PFALSE implicit $vg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr64common = PHI [[COPY2]], %bb.0, %5, %bb.1
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gpr64sp = PHI [[COPY3]], %bb.0, %7, %bb.1
+ ; CHECK-NEXT: STR_PXI [[PFALSE]], [[PHI]], 0
+ ; CHECK-NEXT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[PHI1]], 1, 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64all = COPY [[SUBSXri]]
+ ; CHECK-NEXT: [[INCD_XPiI:%[0-9]+]]:gpr64 = INCD_XPiI [[PHI]], 31, 1
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr64all = COPY [[INCD_XPiI]]
+ ; CHECK-NEXT: Bcc 1, %bb.1, implicit $nzcv
+ ; CHECK-NEXT: B %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-NEXT: RET_ReallyLR
+ bb.0:
+ successors: %bb.1
+ liveins: $x0, $x1
+
+ %5:gpr64 = COPY $x1
+ %4:gpr64 = COPY $x0
+ MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ %6:gpr64all = COPY %4
+ %7:gpr64all = COPY %5
+
+ bb.1:
+ successors: %bb.2, %bb.1
+
+ %0:gpr64common = PHI %6, %bb.0, %3, %bb.1
+ %1:gpr64sp = PHI %7, %bb.0, %2, %bb.1
+ %8:ppr = PFALSE implicit $vg
+ STR_PXI killed %8, %0, 0
+ %9:gpr64 = SUBSXri %1, 1, 0, implicit-def $nzcv
+ %2:gpr64all = COPY %9
+ %10:gpr64 = INCD_XPiI %0, 31, 1
+ %3:gpr64all = COPY %10
+
+
+ Bcc 1, %bb.1, implicit $nzcv
+ B %bb.2
+
+ bb.2:
+ MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ RET_ReallyLR
+...
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
index d675733..6e262cc 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
@@ -8,27 +8,31 @@ declare void @streaming_compatible_callee() "aarch64_pstate_sm_compatible";
define float @sm_body_sm_compatible_simple() "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" nounwind {
; CHECK-LABEL: sm_body_sm_compatible_simple:
; CHECK: // %bb.0:
-; CHECK-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
-; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
; CHECK-NEXT: bl __arm_sme_state
; CHECK-NEXT: and x8, x0, #0x1
; CHECK-NEXT: tbnz w8, #0, .LBB0_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: smstart sm
; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: fmov s0, wzr
+; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
; CHECK-NEXT: tbnz w8, #0, .LBB0_4
; CHECK-NEXT: // %bb.3:
; CHECK-NEXT: smstop sm
; CHECK-NEXT: .LBB0_4:
-; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: fmov s0, wzr
-; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
-; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
; CHECK-NEXT: ret
ret float zeroinitializer
}
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-body.ll b/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
index 9387554..08dec22 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
@@ -87,29 +87,27 @@ if.end:
define <2 x i64> @locally_streaming_caller_no_callee(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
; CHECK-LABEL: locally_streaming_caller_no_callee:
; CHECK: // %bb.0:
-; CHECK-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
-; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: str x29, [sp, #64] // 8-byte Folded Spill
-; CHECK-NEXT: addsvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: smstart sm
; CHECK-NEXT: index z0.d, #0, #1
-; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: add z0.d, z0.d, z1.d
; CHECK-NEXT: add z0.d, z0.d, #41 // =0x29
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: smstop sm
-; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
-; CHECK-NEXT: addsvl sp, sp, #1
-; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
-; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #80
; CHECK-NEXT: ret
%add = add <2 x i64> %a, <i64 41, i64 42>;
diff --git a/llvm/test/CodeGen/AArch64/sme-write-vg.ll b/llvm/test/CodeGen/AArch64/sme-write-vg.ll
new file mode 100644
index 0000000..577606d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-write-vg.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mattr=+sme -stop-after=finalize-isel < %s | FileCheck %s
+
+target triple = "aarch64"
+
+; Check that we don't define VG for 'smstart za' and 'smstop za'
+define void @smstart_za() "aarch64_new_za" nounwind {
+ ; CHECK-LABEL: name: smstart_za
+ ; CHECK-NOT: implicit-def {{[^,]*}}$vg
+ ret void
+}
+
+; Check that we do define VG for 'smstart sm' and 'smstop sm'
+define void @smstart_sm() nounwind {
+ ; CHECK-LABEL: name: smstart_sm
+ ; CHECK: MSRpstatesvcrImm1 1, 1,
+ ; CHECK-SAME: implicit-def {{[^,]*}}$vg
+ ; CHECK: MSRpstatesvcrImm1 1, 0,
+ ; CHECK-SAME: implicit-def {{[^,]*}}$vg
+ call void @require_sm()
+ ret void
+}
+
+declare void @require_sm() "aarch64_pstate_sm_enabled"
+declare void @require_za() "aarch64_inout_za"
diff --git a/llvm/test/CodeGen/AArch64/soft-float-abi.ll b/llvm/test/CodeGen/AArch64/soft-float-abi.ll
new file mode 100644
index 0000000..291c387
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/soft-float-abi.ll
@@ -0,0 +1,161 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple aarch64-none-eabi < %s -mattr=-fp-armv8 | FileCheck %s
+
+; See also clang/test/CodeGen/aarch64-soft-float-abi.c, which tests the clang
+; parts of the soft-float ABI.
+
+; FP types up to 64-bit are passed in a general purpose register.
+define half @test0(half %a, half %b) {
+; CHECK-LABEL: test0:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+entry:
+ ret half %b
+}
+
+define bfloat @test1(i32 %a, bfloat %b) {
+; CHECK-LABEL: test1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+entry:
+ ret bfloat %b
+}
+
+define float @test2(i64 %a, float %b) {
+; CHECK-LABEL: test2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+entry:
+ ret float %b
+}
+
+define double @test3(half %a, double %b) {
+; CHECK-LABEL: test3:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: ret
+entry:
+ ret double %b
+}
+
+; fp128 is passed in a pair of GPRs.
+define fp128 @test4(fp128 %a, fp128 %b) {
+; CHECK-LABEL: test4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: ret
+entry:
+ ret fp128 %b
+}
+
+; fp128 is passed in an aligned pair of GPRs, leaving one register unused is
+; necessary.
+define fp128 @test5(float %a, fp128 %b) {
+; CHECK-LABEL: test5:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x1, x3
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: ret
+entry:
+ ret fp128 %b
+}
+
+; If the alignment of an fp128 leaves a register unused, it remains unused even
+; if a later argument could fit in it.
+define i64 @test6(i64 %a, fp128 %b, i64 %c) {
+; CHECK-LABEL: test6:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov x0, x4
+; CHECK-NEXT: ret
+entry:
+ ret i64 %c
+}
+
+; HFAs are all bit-casted to integer types in the frontend when using the
+; soft-float ABI, so they get passed in the same way as non-homeogeneous
+; aggregates. The IR is identical to the equivalent integer types, so nothing
+; to test here.
+
+; The PCS for vector and HVA types is not defined by the soft-float ABI because
+; these types are only defined by the ACLE when vector hardware is available,
+; so nothing to test here.
+
+; The front-end generates IR for va_arg which always reads from the integer
+; register save area, and never the floating-point register save area. The
+; layout of the va_list type remains the same, the floating-point related
+; fields are unused. The only change needed in the backend is in va_start, to
+; not attempt to save the floating-point registers or set the FP fields in the
+; va_list.
+%struct.__va_list = type { ptr, ptr, ptr, i32, i32 }
+declare void @llvm.va_start(ptr)
+define double @test20(i32 %a, ...) {
+; CHECK-LABEL: test20:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: .cfi_def_cfa_offset 96
+; CHECK-NEXT: mov w8, #-56 // =0xffffffc8
+; CHECK-NEXT: add x10, sp, #8
+; CHECK-NEXT: add x9, sp, #96
+; CHECK-NEXT: str x8, [sp, #88]
+; CHECK-NEXT: add x10, x10, #56
+; CHECK-NEXT: ldrsw x8, [sp, #88]
+; CHECK-NEXT: stp x1, x2, [sp, #8]
+; CHECK-NEXT: stp x3, x4, [sp, #24]
+; CHECK-NEXT: stp x5, x6, [sp, #40]
+; CHECK-NEXT: stp x7, x9, [sp, #56]
+; CHECK-NEXT: str x10, [sp, #72]
+; CHECK-NEXT: tbz w8, #31, .LBB7_3
+; CHECK-NEXT: // %bb.1: // %vaarg.maybe_reg
+; CHECK-NEXT: add w9, w8, #8
+; CHECK-NEXT: cmn w8, #8
+; CHECK-NEXT: str w9, [sp, #88]
+; CHECK-NEXT: b.gt .LBB7_3
+; CHECK-NEXT: // %bb.2: // %vaarg.in_reg
+; CHECK-NEXT: ldr x9, [sp, #72]
+; CHECK-NEXT: add x8, x9, x8
+; CHECK-NEXT: b .LBB7_4
+; CHECK-NEXT: .LBB7_3: // %vaarg.on_stack
+; CHECK-NEXT: ldr x8, [sp, #64]
+; CHECK-NEXT: add x9, x8, #8
+; CHECK-NEXT: str x9, [sp, #64]
+; CHECK-NEXT: .LBB7_4: // %vaarg.end
+; CHECK-NEXT: ldr x0, [x8]
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: ret
+entry:
+ %vl = alloca %struct.__va_list, align 8
+ call void @llvm.va_start(ptr nonnull %vl)
+ %gr_offs_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 3
+ %gr_offs = load i32, ptr %gr_offs_p, align 8
+ %0 = icmp sgt i32 %gr_offs, -1
+ br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg
+
+vaarg.maybe_reg: ; preds = %entry
+ %new_reg_offs = add nsw i32 %gr_offs, 8
+ store i32 %new_reg_offs, ptr %gr_offs_p, align 8
+ %inreg = icmp slt i32 %gr_offs, -7
+ br i1 %inreg, label %vaarg.in_reg, label %vaarg.on_stack
+
+vaarg.in_reg: ; preds = %vaarg.maybe_reg
+ %reg_top_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 1
+ %reg_top = load ptr, ptr %reg_top_p, align 8
+ %1 = sext i32 %gr_offs to i64
+ %2 = getelementptr inbounds i8, ptr %reg_top, i64 %1
+ br label %vaarg.end
+
+vaarg.on_stack: ; preds = %vaarg.maybe_reg, %entry
+ %stack = load ptr, ptr %vl, align 8
+ %new_stack = getelementptr inbounds i8, ptr %stack, i64 8
+ store ptr %new_stack, ptr %vl, align 8
+ br label %vaarg.end
+
+vaarg.end: ; preds = %vaarg.on_stack, %vaarg.in_reg
+ %vaargs.addr = phi ptr [ %2, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ]
+ %3 = load double, ptr %vaargs.addr, align 8
+ ret double %3
+}
+
diff --git a/llvm/test/CodeGen/AArch64/srem-vec-crash.ll b/llvm/test/CodeGen/AArch64/srem-vec-crash.ll
new file mode 100644
index 0000000..0fce8de
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/srem-vec-crash.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s
+
+define i32 @pr84830(i1 %arg) {
+; CHECK-LABEL: pr84830:
+; CHECK: // %bb.0: // %bb
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+bb:
+ %new0 = srem i1 %arg, true
+ %last = zext i1 %new0 to i32
+ %i = icmp ne i32 %last, 0
+ %i1 = select i1 %i, i32 0, i32 1
+ ret i32 %i1
+}
diff --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index acec3e7..d1f843a 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -146,7 +146,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: shl v0.4h, v0.4h, #8
; CHECK-NEXT: sqsub v0.4h, v0.4h, v1.4h
; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir b/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
index f2d79bd..a9c9b5f 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
+++ b/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
@@ -29,6 +29,7 @@ tracksRegLiveness: true
liveins:
- { reg: '$w0', virtual-reg: '' }
frameInfo:
+ adjustsStack: true
localFrameSize: 150000
stack:
- { id: 0, name: a, type: default, offset: 0, size: 150000, alignment: 8,
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir b/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir
index 83aa90d..985ec35 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir
+++ b/llvm/test/CodeGen/AArch64/stack-probing-shrink-wrap.mir
@@ -31,6 +31,7 @@ tracksRegLiveness: true
liveins:
- { reg: '$w0', virtual-reg: '' }
frameInfo:
+ adjustsStack: true
localFrameSize: 150000
stack:
- { id: 0, name: a, type: default, offset: 0, size: 150000, alignment: 8,
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
index d8969fc..22d177c 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
@@ -20,10 +20,10 @@ entry:
; CHECK-LABEL: define void @OneVarNoInit(
; CHECK-DAG: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-DAG: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.{{.*}}(ptr [[X]], {{.*}}, i64 0)
-; CHECK-DAG: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TX]])
+; CHECK-DAG: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
; CHECK-DAG: call void @llvm.aarch64.settag(ptr [[TX]], i64 16)
; CHECK-DAG: call void @use(ptr nonnull [[TX]])
-; CHECK-DAG: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TX]])
+; CHECK-DAG: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
define void @OneVarInitConst() sanitize_memtag {
entry:
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
index 6eb7201..5d1c91e 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
@@ -1,20 +1,18 @@
; Test that storage for allocas with disjoint lifetimes is reused with stack
; tagging.
-; RUN: opt -S -aarch64-stack-tagging %s -o - | \
-; RUN: llc -no-stack-coloring=false -o - | \
+; RUN: llc --mattr=+mte -no-stack-coloring=false -stack-tagging-use-stack-safety=0 -o - %s | \
; RUN: FileCheck %s --check-prefix=COLOR
-; RUN: opt -S -aarch64-stack-tagging %s -o - | \
-; RUN: llc -no-stack-coloring=true -o - | \
+; RUN: llc --mattr=+mte -no-stack-coloring=true -stack-tagging-use-stack-safety=0 -o - %s | \
; RUN: FileCheck %s --check-prefix=NOCOLOR
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-unknown-linux-android29"
+target triple = "aarch64"
; COLOR: sub sp, sp, #192
-; NOCOLOR: sub sp, sp, #320
+; NOCOLOR: sub sp, sp, #336
-define i32 @myCall_w2(i32 %in) sanitize_hwaddress {
+define i32 @myCall_w2(i32 %in) sanitize_memtag {
entry:
%a = alloca [17 x ptr], align 8
%a2 = alloca [16 x ptr], align 8
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
index 06f8cd5..aa9cccc 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
@@ -27,7 +27,7 @@ S1:
; CHECK: call void @llvm.aarch64.settag(ptr %w, i64 48)
; CHECK-NOT: settag{{.*}}%v
call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) #1
-; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w.tag)
+; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w)
%b1 = icmp eq i32 %t1, 0
br i1 %b1, label %S2, label %S3
; CHECK-NOT: settag
diff --git a/llvm/test/CodeGen/AArch64/stackmap.ll b/llvm/test/CodeGen/AArch64/stackmap.ll
index ce7dcc4a..995d254 100644
--- a/llvm/test/CodeGen/AArch64/stackmap.ll
+++ b/llvm/test/CodeGen/AArch64/stackmap.ll
@@ -9,11 +9,11 @@
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .hword 0
; Num Functions
-; CHECK-NEXT: .word 14
+; CHECK-NEXT: .word 15
; Num LargeConstants
; CHECK-NEXT: .word 4
; Num Callsites
-; CHECK-NEXT: .word 18
+; CHECK-NEXT: .word 22
; Functions and stack size
; CHECK-NEXT: .xword constantargs
@@ -49,6 +49,9 @@
; CHECK-NEXT: .xword longid
; CHECK-NEXT: .xword 16
; CHECK-NEXT: .xword 4
+; CHECK-NEXT: .xword statepoint_longid
+; CHECK-NEXT: .xword 16
+; CHECK-NEXT: .xword 4
; CHECK-NEXT: .xword clobberLR
; CHECK-NEXT: .xword 112
; CHECK-NEXT: .xword 1
@@ -443,6 +446,26 @@ entry:
ret void
}
+; Test a 64-bit ID for statepoint.
+;
+; CHECK: .xword 4294967295
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+; CHECK: .xword 4294967296
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+; CHECK: .xword 9223372036854775807
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+; CHECK: .xword -1
+; CHECK-LABEL: .word .L{{.*}}-statepoint_longid
+define void @statepoint_longid() gc "statepoint-example" {
+entry:
+ %safepoint_token1 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 4294967295, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ %safepoint_token2 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 4294967296, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ %safepoint_token3 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 9223372036854775807, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ %safepoint_token4 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 -1, i32 0, ptr elementtype(void ()) @return_void, i32 0, i32 0, i32 0, i32 0)
+ ret void
+}
+declare void @return_void()
+
; Map a value when R11 is the only free register.
; The scratch register should not be used for a live stackmap value.
;
@@ -463,8 +486,8 @@ define void @clobberLR(i32 %a) {
ret void
}
-; A stack frame which needs to be realigned at runtime (to meet alignment
-; criteria for values on the stack) does not have a fixed frame size.
+; A stack frame which needs to be realigned at runtime (to meet alignment
+; criteria for values on the stack) does not have a fixed frame size.
; CHECK-LABEL: .word .L{{.*}}-needsStackRealignment
; CHECK-NEXT: .hword 0
; 0 locations
@@ -537,3 +560,4 @@ define void @floats(float %f, double %g) {
declare void @llvm.experimental.stackmap(i64, i32, ...)
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
index 9fa5208..3db802a 100644
--- a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
+++ b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
@@ -273,7 +273,7 @@ define void @outgoing_v4f16_return(ptr %ptr) #0 {
; NOFP16-NEXT: strh w0, [x19]
; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; NOFP16-NEXT: ret
- %val = call <4 x half> @v4f16_result()
+ %val = call <4 x half> @v4f16_result() #0
store <4 x half> %val, ptr %ptr
ret void
}
@@ -297,7 +297,7 @@ define void @outgoing_v8f16_return(ptr %ptr) #0 {
; NOFP16-NEXT: strh w0, [x19]
; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; NOFP16-NEXT: ret
- %val = call <8 x half> @v8f16_result()
+ %val = call <8 x half> @v8f16_result() #0
store <8 x half> %val, ptr %ptr
ret void
}
@@ -312,7 +312,7 @@ define half @call_split_type_used_outside_block_v8f16() #0 {
; NOFP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; NOFP16-NEXT: ret
bb0:
- %split.ret.type = call <8 x half> @v8f16_result()
+ %split.ret.type = call <8 x half> @v8f16_result() #0
br label %bb1
bb1:
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 4f8a4f7..0ad9900 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -41,8 +41,8 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcvtzs v2.4s, v2.4s
; CHECK-NEXT: xtn v1.4h, v1.4s
; CHECK-NEXT: xtn v2.4h, v2.4s
-; CHECK-NEXT: xtn v1.8b, v1.8h
-; CHECK-NEXT: xtn v2.8b, v2.8h
+; CHECK-NEXT: uzp1 v1.8b, v1.8b, v0.8b
+; CHECK-NEXT: uzp1 v2.8b, v2.8b, v0.8b
; CHECK-NEXT: mov v1.s[1], v2.s[0]
; CHECK-NEXT: stur d1, [x12, #-4]
; CHECK-NEXT: add x12, x12, #8
diff --git a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
index ba367b0..18cd4cc 100644
--- a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
@@ -710,23 +710,23 @@ define void @trunc_v11i64_to_v11i8_in_loop(ptr %A, ptr %dst) {
; CHECK-NEXT: LBB6_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldp q4, q1, [x0, #48]
-; CHECK-NEXT: add x9, x1, #8
-; CHECK-NEXT: ldp q3, q2, [x0]
-; CHECK-NEXT: subs x8, x8, #1
+; CHECK-NEXT: add x9, x1, #10
; CHECK-NEXT: ldr d0, [x0, #80]
+; CHECK-NEXT: ldp q3, q2, [x0]
; CHECK-NEXT: ldr q5, [x0, #32]
+; CHECK-NEXT: subs x8, x8, #1
; CHECK-NEXT: add x0, x0, #128
-; CHECK-NEXT: uzp1.4s v4, v5, v4
-; CHECK-NEXT: uzp1.4s v2, v3, v2
; CHECK-NEXT: uzp1.4s v0, v1, v0
-; CHECK-NEXT: uzp1.8h v1, v2, v4
+; CHECK-NEXT: uzp1.4s v1, v5, v4
+; CHECK-NEXT: uzp1.4s v2, v3, v2
; CHECK-NEXT: xtn.4h v0, v0
-; CHECK-NEXT: uzp1.16b v1, v1, v0
-; CHECK-NEXT: xtn.8b v0, v0
-; CHECK-NEXT: st1.h { v1 }[4], [x9]
-; CHECK-NEXT: add x9, x1, #10
-; CHECK-NEXT: st1.b { v0 }[2], [x9]
-; CHECK-NEXT: str d1, [x1], #16
+; CHECK-NEXT: uzp1.8h v1, v2, v1
+; CHECK-NEXT: uzp1.8b v2, v0, v0
+; CHECK-NEXT: uzp1.16b v0, v1, v0
+; CHECK-NEXT: st1.b { v2 }[2], [x9]
+; CHECK-NEXT: add x9, x1, #8
+; CHECK-NEXT: st1.h { v0 }[4], [x9]
+; CHECK-NEXT: str d0, [x1], #16
; CHECK-NEXT: b.eq LBB6_1
; CHECK-NEXT: ; %bb.2: ; %exit
; CHECK-NEXT: ret
@@ -755,7 +755,7 @@ define void @trunc_v11i64_to_v11i8_in_loop(ptr %A, ptr %dst) {
; CHECK-BE-NEXT: xtn v0.4h, v0.4s
; CHECK-BE-NEXT: uzp1 v1.8h, v1.8h, v2.8h
; CHECK-BE-NEXT: uzp1 v1.16b, v1.16b, v0.16b
-; CHECK-BE-NEXT: xtn v0.8b, v0.8h
+; CHECK-BE-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-BE-NEXT: rev16 v2.16b, v1.16b
; CHECK-BE-NEXT: rev64 v1.16b, v1.16b
; CHECK-BE-NEXT: st1 { v0.b }[2], [x9]
@@ -790,7 +790,7 @@ define void @trunc_v11i64_to_v11i8_in_loop(ptr %A, ptr %dst) {
; CHECK-DISABLE-NEXT: xtn v0.4h, v0.4s
; CHECK-DISABLE-NEXT: uzp1 v1.8h, v1.8h, v2.8h
; CHECK-DISABLE-NEXT: uzp1 v1.16b, v1.16b, v0.16b
-; CHECK-DISABLE-NEXT: xtn v0.8b, v0.8h
+; CHECK-DISABLE-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-DISABLE-NEXT: rev16 v2.16b, v1.16b
; CHECK-DISABLE-NEXT: rev64 v1.16b, v1.16b
; CHECK-DISABLE-NEXT: st1 { v0.b }[2], [x9]
diff --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index e05c65d..f0bbed5 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -142,7 +142,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: movi d0, #0xff00ff00ff00ff
; CHECK-NEXT: uaddl v1.8h, v1.8b, v2.8b
; CHECK-NEXT: umin v0.4h, v1.4h, v0.4h
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index 05f43e7..82c0327 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -143,7 +143,7 @@ define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-NEXT: ushll v1.8h, v1.8b, #0
; CHECK-NEXT: uqsub v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-NEXT: str s0, [x2]
; CHECK-NEXT: ret
%x = load <4 x i8>, ptr %px
diff --git a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
index 380bdbc..6119405 100644
--- a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
+++ b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
@@ -9,9 +9,8 @@ define <8 x i8> @float_to_i8(ptr %in) {
; CHECK-NEXT: fadd v0.4s, v0.4s, v0.4s
; CHECK-NEXT: fcvtzs v0.4s, v0.4s
; CHECK-NEXT: fcvtzs v1.4s, v1.4s
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: xtn v1.4h, v1.4s
-; CHECK-NEXT: uzp1 v0.8b, v1.8b, v0.8b
+; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: ret
%l = load <8 x float>, ptr %in
%scale = fmul <8 x float> %l, <float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0>
diff --git a/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll b/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
index 9c6ab8d..dd7a9c6 100644
--- a/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
+++ b/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
@@ -210,7 +210,7 @@ define void @no_combine_for_non_bool_truncate(<4 x i32> %vec, ptr %out) {
; CHECK-LABEL: no_combine_for_non_bool_truncate:
; CHECK: ; %bb.0:
; CHECK-NEXT: xtn.4h v0, v0
-; CHECK-NEXT: xtn.8b v0, v0
+; CHECK-NEXT: uzp1.8b v0, v0, v0
; CHECK-NEXT: str s0, [x0]
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
index 90328f7..71d55df 100644
--- a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
@@ -410,7 +410,7 @@ define void @store_trunc_from_64bits(ptr %src, ptr %dst) {
; BE-NEXT: ldrh w8, [x0, #4]
; BE-NEXT: rev32 v0.4h, v0.4h
; BE-NEXT: mov v0.h[2], w8
-; BE-NEXT: xtn v0.8b, v0.8h
+; BE-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; BE-NEXT: rev32 v0.16b, v0.16b
; BE-NEXT: str s0, [sp, #12]
; BE-NEXT: ldrh w9, [sp, #12]
@@ -456,7 +456,7 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
; BE-NEXT: add x8, x8, :lo12:.LCPI11_0
; BE-NEXT: ld1 { v1.4h }, [x8]
; BE-NEXT: add v0.4h, v0.4h, v1.4h
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -638,7 +638,7 @@ define void @shift_trunc_store(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -672,7 +672,7 @@ define void @shift_trunc_store_default_align(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -706,7 +706,7 @@ define void @shift_trunc_store_align_4(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -741,7 +741,7 @@ define void @shift_trunc_store_const_offset_1(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -777,7 +777,7 @@ define void @shift_trunc_store_const_offset_3(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -801,7 +801,7 @@ define void @shift_trunc_volatile_store(ptr %src, ptr %dst) {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: shrn.4h v0, v0, #16
-; CHECK-NEXT: xtn.8b v1, v0
+; CHECK-NEXT: uzp1.8b v1, v0, v0
; CHECK-NEXT: umov.h w8, v0[2]
; CHECK-NEXT: str s1, [sp, #12]
; CHECK-NEXT: ldrh w9, [sp, #12]
@@ -816,7 +816,7 @@ define void @shift_trunc_volatile_store(ptr %src, ptr %dst) {
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
@@ -868,7 +868,7 @@ define void @load_v3i8_zext_to_3xi32_add_trunc_store(ptr %src) {
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: ld1 { v0.b }[4], [x9]
; BE-NEXT: add v0.4h, v0.4h, v1.4h
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #8]
@@ -921,7 +921,7 @@ define void @load_v3i8_sext_to_3xi32_add_trunc_store(ptr %src) {
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: ld1 { v0.b }[4], [x9]
; BE-NEXT: add v0.4h, v0.4h, v1.4h
-; BE-NEXT: xtn v1.8b, v0.8h
+; BE-NEXT: uzp1 v1.8b, v0.8b, v0.8b
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #8]
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index 66b4946..66ef436 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -4,11 +4,6 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-BASE
; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - -mattr=+dotprod 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-DOT
-; CHECK-GI-BASE: warning: Instruction selection used fallback path for test_udot_v24i8
-; CHECK-GI-BASE-NEXT: warning: Instruction selection used fallback path for test_udot_v48i8
-; CHECK-GI-BASE-NEXT: warning: Instruction selection used fallback path for test_sdot_v24i8
-; CHECK-GI-BASE-NEXT: warning: Instruction selection used fallback path for test_sdot_v48i8
-
define i32 @addv_v2i32(<2 x i32> %a) {
; CHECK-LABEL: addv_v2i32:
; CHECK: // %bb.0: // %entry
@@ -2070,126 +2065,50 @@ define i32 @test_udot_v24i8(ptr %p1, ptr %p2) {
; CHECK-GI-BASE: // %bb.0: // %entry
; CHECK-GI-BASE-NEXT: ldr q0, [x0]
; CHECK-GI-BASE-NEXT: ldr q1, [x1]
-; CHECK-GI-BASE-NEXT: ldr d4, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: ushll v4.8h, v0.8b, #0
; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: ushll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ushll v2.8h, v2.8b, #0
; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: umull v6.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull2 v2.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v4.8h, v3.8h
-; CHECK-GI-BASE-NEXT: umlal v6.4s, v4.4h, v3.4h
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: umlal v6.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: add v0.4s, v6.4s, v2.4s
+; CHECK-GI-BASE-NEXT: ushll v3.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: umull v6.4s, v5.4h, v4.4h
+; CHECK-GI-BASE-NEXT: umull2 v4.4s, v5.8h, v4.8h
+; CHECK-GI-BASE-NEXT: umull2 v5.4s, v1.8h, v0.8h
+; CHECK-GI-BASE-NEXT: umull v7.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: umull v0.4s, v1.4h, v0.4h
+; CHECK-GI-BASE-NEXT: umull2 v1.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s2, v6.4s
+; CHECK-GI-BASE-NEXT: addv s3, v4.4s
+; CHECK-GI-BASE-NEXT: addv s4, v5.4s
+; CHECK-GI-BASE-NEXT: addv s5, v7.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s1, v1.4s
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s3
+; CHECK-GI-BASE-NEXT: fmov w10, s4
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v24i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x1]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #1]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #8]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #2]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #3]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #4]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #5]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #6]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #7]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #16]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #16]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #9]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #10]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #11]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #21]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #12]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #13]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #23]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #14]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #15]
-; CHECK-GI-DOT-NEXT: fmov d3, d3
-; CHECK-GI-DOT-NEXT: fmov d4, d4
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v5.b[0]
-; CHECK-GI-DOT-NEXT: movi v5.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v6.b[0]
-; CHECK-GI-DOT-NEXT: udot v0.4s, v4.16b, v3.16b
-; CHECK-GI-DOT-NEXT: udot v5.4s, v2.16b, v1.16b
-; CHECK-GI-DOT-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q2, [x0]
+; CHECK-GI-DOT-NEXT: ldr d3, [x0, #16]
+; CHECK-GI-DOT-NEXT: ldr q4, [x1]
+; CHECK-GI-DOT-NEXT: ldr d5, [x1, #16]
+; CHECK-GI-DOT-NEXT: udot v1.4s, v4.16b, v2.16b
+; CHECK-GI-DOT-NEXT: udot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
; CHECK-GI-DOT-NEXT: fmov w0, s0
; CHECK-GI-DOT-NEXT: ret
@@ -2257,243 +2176,91 @@ define i32 @test_udot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ldp q0, q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr q2, [x0, #32]
-; CHECK-GI-BASE-NEXT: ldp q1, q3, [x0]
-; CHECK-GI-BASE-NEXT: ldr q7, [x1, #32]
-; CHECK-GI-BASE-NEXT: ushll2 v16.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v6.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v5.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: umull2 v18.4s, v6.8h, v5.8h
-; CHECK-GI-BASE-NEXT: umull v19.4s, v0.4h, v1.4h
-; CHECK-GI-BASE-NEXT: umull v5.4s, v6.4h, v5.4h
-; CHECK-GI-BASE-NEXT: umull2 v0.4s, v0.8h, v1.8h
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v6.8h, v3.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v7.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v17.8h, v16.8h
-; CHECK-GI-BASE-NEXT: umlal v5.4s, v17.4h, v16.4h
-; CHECK-GI-BASE-NEXT: umlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: umlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: umlal v5.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: umlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: umlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: add v1.4s, v19.4s, v5.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v18.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-GI-BASE-NEXT: ldp q0, q3, [x1]
+; CHECK-GI-BASE-NEXT: ldr q6, [x1, #32]
+; CHECK-GI-BASE-NEXT: ldp q1, q2, [x0]
+; CHECK-GI-BASE-NEXT: ldr q17, [x0, #32]
+; CHECK-GI-BASE-NEXT: ushll v4.8h, v0.8b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: ushll v7.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: ushll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
+; CHECK-GI-BASE-NEXT: ushll v16.8h, v2.8b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v3.8h, v3.16b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v2.8h, v2.16b, #0
+; CHECK-GI-BASE-NEXT: umull v18.4s, v4.4h, v5.4h
+; CHECK-GI-BASE-NEXT: umull2 v4.4s, v4.8h, v5.8h
+; CHECK-GI-BASE-NEXT: umull2 v19.4s, v0.8h, v1.8h
+; CHECK-GI-BASE-NEXT: umull v20.4s, v7.4h, v16.4h
+; CHECK-GI-BASE-NEXT: umull v0.4s, v0.4h, v1.4h
+; CHECK-GI-BASE-NEXT: ushll v5.8h, v6.8b, #0
+; CHECK-GI-BASE-NEXT: ushll v1.8h, v17.8b, #0
+; CHECK-GI-BASE-NEXT: umull2 v7.4s, v7.8h, v16.8h
+; CHECK-GI-BASE-NEXT: ushll2 v6.8h, v6.16b, #0
+; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v17.16b, #0
+; CHECK-GI-BASE-NEXT: addv s16, v18.4s
+; CHECK-GI-BASE-NEXT: addv s4, v4.4s
+; CHECK-GI-BASE-NEXT: umull v18.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: umull2 v2.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s3, v19.4s
+; CHECK-GI-BASE-NEXT: umull v19.4s, v5.4h, v1.4h
+; CHECK-GI-BASE-NEXT: umull2 v1.4s, v5.8h, v1.8h
+; CHECK-GI-BASE-NEXT: addv s5, v20.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s7, v7.4s
+; CHECK-GI-BASE-NEXT: umull v20.4s, v6.4h, v17.4h
+; CHECK-GI-BASE-NEXT: umull2 v6.4s, v6.8h, v17.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s16
+; CHECK-GI-BASE-NEXT: fmov w9, s4
+; CHECK-GI-BASE-NEXT: fmov w10, s3
+; CHECK-GI-BASE-NEXT: addv s3, v18.4s
+; CHECK-GI-BASE-NEXT: addv s2, v2.4s
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: addv s4, v19.4s
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: addv s0, v1.4s
+; CHECK-GI-BASE-NEXT: addv s1, v20.4s
+; CHECK-GI-BASE-NEXT: addv s5, v6.4s
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: fmov w12, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s7
+; CHECK-GI-BASE-NEXT: add w9, w10, w9
+; CHECK-GI-BASE-NEXT: add w10, w11, w12
+; CHECK-GI-BASE-NEXT: fmov w11, s4
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w10, s0
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w9, w9, w10
+; CHECK-GI-BASE-NEXT: fmov w10, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v48i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x0, #16]
-; CHECK-GI-DOT-NEXT: ldr b6, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #1]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x1, #16]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #32]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #33]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #32]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #33]
-; CHECK-GI-DOT-NEXT: mov v5.b[1], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #2]
-; CHECK-GI-DOT-NEXT: mov v6.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #34]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #34]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v5.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v6.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #3]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #35]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #35]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v5.b[3], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #36]
-; CHECK-GI-DOT-NEXT: mov v6.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #4]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #36]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v5.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #5]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #21]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #37]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #37]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v5.b[5], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #6]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #38]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #38]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v5.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #7]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #23]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #39]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #39]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v5.b[7], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #24]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #8]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #24]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #40]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #40]
-; CHECK-GI-DOT-NEXT: mov v4.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v5.b[8], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #25]
-; CHECK-GI-DOT-NEXT: mov v3.b[8], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #9]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #25]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #41]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #41]
-; CHECK-GI-DOT-NEXT: mov v4.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v5.b[9], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #26]
-; CHECK-GI-DOT-NEXT: mov v3.b[9], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #10]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #26]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #42]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #42]
-; CHECK-GI-DOT-NEXT: mov v4.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v5.b[10], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #27]
-; CHECK-GI-DOT-NEXT: mov v3.b[10], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #11]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #27]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #43]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #43]
-; CHECK-GI-DOT-NEXT: mov v4.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v5.b[11], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #28]
-; CHECK-GI-DOT-NEXT: mov v3.b[11], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #12]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #28]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #44]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #44]
-; CHECK-GI-DOT-NEXT: mov v4.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v5.b[12], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #29]
-; CHECK-GI-DOT-NEXT: mov v3.b[12], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #13]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #29]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #45]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #45]
-; CHECK-GI-DOT-NEXT: mov v4.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v5.b[13], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #30]
-; CHECK-GI-DOT-NEXT: mov v3.b[13], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #14]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #30]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #46]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #46]
-; CHECK-GI-DOT-NEXT: mov v4.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v5.b[14], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #31]
-; CHECK-GI-DOT-NEXT: mov v3.b[14], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #15]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #31]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #47]
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #47]
-; CHECK-GI-DOT-NEXT: mov v4.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: movi v7.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v5.b[15], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v3.b[15], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: movi v16.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: udot v0.4s, v4.16b, v1.16b
-; CHECK-GI-DOT-NEXT: udot v7.4s, v5.16b, v2.16b
-; CHECK-GI-DOT-NEXT: udot v16.4s, v6.16b, v3.16b
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q7, [x0, #32]
+; CHECK-GI-DOT-NEXT: ldp q3, q4, [x0]
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldp q5, q6, [x1]
+; CHECK-GI-DOT-NEXT: ldr q16, [x1, #32]
+; CHECK-GI-DOT-NEXT: udot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: udot v1.4s, v6.16b, v4.16b
+; CHECK-GI-DOT-NEXT: udot v2.4s, v16.16b, v7.16b
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
-; CHECK-GI-DOT-NEXT: addv s1, v7.4s
-; CHECK-GI-DOT-NEXT: addv s2, v16.4s
+; CHECK-GI-DOT-NEXT: addv s1, v1.4s
+; CHECK-GI-DOT-NEXT: addv s2, v2.4s
; CHECK-GI-DOT-NEXT: fmov w8, s0
; CHECK-GI-DOT-NEXT: fmov w9, s1
-; CHECK-GI-DOT-NEXT: fmov w10, s2
; CHECK-GI-DOT-NEXT: add w8, w8, w9
-; CHECK-GI-DOT-NEXT: add w0, w8, w10
+; CHECK-GI-DOT-NEXT: fmov w9, s2
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
; CHECK-GI-DOT-NEXT: ret
entry:
%a = load <48 x i8>, ptr %p1
@@ -2648,126 +2415,50 @@ define i32 @test_sdot_v24i8(ptr %p1, ptr %p2) {
; CHECK-GI-BASE: // %bb.0: // %entry
; CHECK-GI-BASE-NEXT: ldr q0, [x0]
; CHECK-GI-BASE-NEXT: ldr q1, [x1]
-; CHECK-GI-BASE-NEXT: ldr d4, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: sshll v4.8h, v0.8b, #0
; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: sshll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: sshll v2.8h, v2.8b, #0
; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: smull v6.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull2 v2.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v4.8h, v3.8h
-; CHECK-GI-BASE-NEXT: smlal v6.4s, v4.4h, v3.4h
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: smlal v6.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: add v0.4s, v6.4s, v2.4s
+; CHECK-GI-BASE-NEXT: sshll v3.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: smull v6.4s, v5.4h, v4.4h
+; CHECK-GI-BASE-NEXT: smull2 v4.4s, v5.8h, v4.8h
+; CHECK-GI-BASE-NEXT: smull2 v5.4s, v1.8h, v0.8h
+; CHECK-GI-BASE-NEXT: smull v7.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: smull v0.4s, v1.4h, v0.4h
+; CHECK-GI-BASE-NEXT: smull2 v1.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s2, v6.4s
+; CHECK-GI-BASE-NEXT: addv s3, v4.4s
+; CHECK-GI-BASE-NEXT: addv s4, v5.4s
+; CHECK-GI-BASE-NEXT: addv s5, v7.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s1, v1.4s
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s3
+; CHECK-GI-BASE-NEXT: fmov w10, s4
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v24i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x1]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #1]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #8]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #2]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #3]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #4]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #5]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #6]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #7]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v3.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #16]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v4.b[0]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1, #16]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #9]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #10]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #11]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #21]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #12]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #13]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #23]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #14]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #15]
-; CHECK-GI-DOT-NEXT: fmov d3, d3
-; CHECK-GI-DOT-NEXT: fmov d4, d4
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v5.b[0]
-; CHECK-GI-DOT-NEXT: movi v5.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v6.b[0]
-; CHECK-GI-DOT-NEXT: sdot v0.4s, v4.16b, v3.16b
-; CHECK-GI-DOT-NEXT: sdot v5.4s, v2.16b, v1.16b
-; CHECK-GI-DOT-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q2, [x0]
+; CHECK-GI-DOT-NEXT: ldr d3, [x0, #16]
+; CHECK-GI-DOT-NEXT: ldr q4, [x1]
+; CHECK-GI-DOT-NEXT: ldr d5, [x1, #16]
+; CHECK-GI-DOT-NEXT: sdot v1.4s, v4.16b, v2.16b
+; CHECK-GI-DOT-NEXT: sdot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
; CHECK-GI-DOT-NEXT: fmov w0, s0
; CHECK-GI-DOT-NEXT: ret
@@ -2835,243 +2526,91 @@ define i32 @test_sdot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ldp q0, q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr q2, [x0, #32]
-; CHECK-GI-BASE-NEXT: ldp q1, q3, [x0]
-; CHECK-GI-BASE-NEXT: ldr q7, [x1, #32]
-; CHECK-GI-BASE-NEXT: sshll2 v16.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v6.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v5.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: smull2 v18.4s, v6.8h, v5.8h
-; CHECK-GI-BASE-NEXT: smull v19.4s, v0.4h, v1.4h
-; CHECK-GI-BASE-NEXT: smull v5.4s, v6.4h, v5.4h
-; CHECK-GI-BASE-NEXT: smull2 v0.4s, v0.8h, v1.8h
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v6.8h, v3.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v7.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v17.8h, v16.8h
-; CHECK-GI-BASE-NEXT: smlal v5.4s, v17.4h, v16.4h
-; CHECK-GI-BASE-NEXT: smlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: smlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: smlal v5.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: smlal v19.4s, v2.4h, v1.4h
-; CHECK-GI-BASE-NEXT: smlal2 v0.4s, v2.8h, v1.8h
-; CHECK-GI-BASE-NEXT: add v1.4s, v19.4s, v5.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v18.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-GI-BASE-NEXT: ldp q0, q3, [x1]
+; CHECK-GI-BASE-NEXT: ldr q6, [x1, #32]
+; CHECK-GI-BASE-NEXT: ldp q1, q2, [x0]
+; CHECK-GI-BASE-NEXT: ldr q17, [x0, #32]
+; CHECK-GI-BASE-NEXT: sshll v4.8h, v0.8b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
+; CHECK-GI-BASE-NEXT: sshll v7.8h, v3.8b, #0
+; CHECK-GI-BASE-NEXT: sshll v5.8h, v1.8b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
+; CHECK-GI-BASE-NEXT: sshll v16.8h, v2.8b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v3.8h, v3.16b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v2.8h, v2.16b, #0
+; CHECK-GI-BASE-NEXT: smull v18.4s, v4.4h, v5.4h
+; CHECK-GI-BASE-NEXT: smull2 v4.4s, v4.8h, v5.8h
+; CHECK-GI-BASE-NEXT: smull2 v19.4s, v0.8h, v1.8h
+; CHECK-GI-BASE-NEXT: smull v20.4s, v7.4h, v16.4h
+; CHECK-GI-BASE-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-GI-BASE-NEXT: sshll v5.8h, v6.8b, #0
+; CHECK-GI-BASE-NEXT: sshll v1.8h, v17.8b, #0
+; CHECK-GI-BASE-NEXT: smull2 v7.4s, v7.8h, v16.8h
+; CHECK-GI-BASE-NEXT: sshll2 v6.8h, v6.16b, #0
+; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v17.16b, #0
+; CHECK-GI-BASE-NEXT: addv s16, v18.4s
+; CHECK-GI-BASE-NEXT: addv s4, v4.4s
+; CHECK-GI-BASE-NEXT: smull v18.4s, v3.4h, v2.4h
+; CHECK-GI-BASE-NEXT: smull2 v2.4s, v3.8h, v2.8h
+; CHECK-GI-BASE-NEXT: addv s3, v19.4s
+; CHECK-GI-BASE-NEXT: smull v19.4s, v5.4h, v1.4h
+; CHECK-GI-BASE-NEXT: smull2 v1.4s, v5.8h, v1.8h
+; CHECK-GI-BASE-NEXT: addv s5, v20.4s
; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: addv s7, v7.4s
+; CHECK-GI-BASE-NEXT: smull v20.4s, v6.4h, v17.4h
+; CHECK-GI-BASE-NEXT: smull2 v6.4s, v6.8h, v17.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s16
+; CHECK-GI-BASE-NEXT: fmov w9, s4
+; CHECK-GI-BASE-NEXT: fmov w10, s3
+; CHECK-GI-BASE-NEXT: addv s3, v18.4s
+; CHECK-GI-BASE-NEXT: addv s2, v2.4s
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: addv s4, v19.4s
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: addv s0, v1.4s
+; CHECK-GI-BASE-NEXT: addv s1, v20.4s
+; CHECK-GI-BASE-NEXT: addv s5, v6.4s
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: fmov w12, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s7
+; CHECK-GI-BASE-NEXT: add w9, w10, w9
+; CHECK-GI-BASE-NEXT: add w10, w11, w12
+; CHECK-GI-BASE-NEXT: fmov w11, s4
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w10, s0
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: add w9, w9, w10
+; CHECK-GI-BASE-NEXT: fmov w10, s1
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v48i8:
; CHECK-GI-DOT: // %bb.0: // %entry
-; CHECK-GI-DOT-NEXT: ldr b1, [x0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x0, #1]
; CHECK-GI-DOT-NEXT: movi v0.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: ldr b2, [x0, #16]
-; CHECK-GI-DOT-NEXT: ldr b6, [x0, #17]
-; CHECK-GI-DOT-NEXT: ldr b4, [x1]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #1]
-; CHECK-GI-DOT-NEXT: mov v1.b[1], v5.b[0]
-; CHECK-GI-DOT-NEXT: ldr b5, [x1, #16]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #17]
-; CHECK-GI-DOT-NEXT: mov v2.b[1], v6.b[0]
-; CHECK-GI-DOT-NEXT: ldr b3, [x0, #32]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #33]
-; CHECK-GI-DOT-NEXT: mov v4.b[1], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b6, [x1, #32]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #33]
-; CHECK-GI-DOT-NEXT: mov v5.b[1], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #2]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #18]
-; CHECK-GI-DOT-NEXT: mov v3.b[1], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #2]
-; CHECK-GI-DOT-NEXT: mov v6.b[1], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #18]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #34]
-; CHECK-GI-DOT-NEXT: mov v2.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x1, #34]
-; CHECK-GI-DOT-NEXT: mov v4.b[2], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #3]
-; CHECK-GI-DOT-NEXT: mov v5.b[2], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #19]
-; CHECK-GI-DOT-NEXT: mov v3.b[2], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #19]
-; CHECK-GI-DOT-NEXT: mov v6.b[2], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #3]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #35]
-; CHECK-GI-DOT-NEXT: mov v2.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #35]
-; CHECK-GI-DOT-NEXT: mov v4.b[3], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #4]
-; CHECK-GI-DOT-NEXT: mov v5.b[3], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #20]
-; CHECK-GI-DOT-NEXT: mov v3.b[3], v18.b[0]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #36]
-; CHECK-GI-DOT-NEXT: mov v6.b[3], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #4]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #20]
-; CHECK-GI-DOT-NEXT: mov v2.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #36]
-; CHECK-GI-DOT-NEXT: mov v4.b[4], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #5]
-; CHECK-GI-DOT-NEXT: mov v5.b[4], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #21]
-; CHECK-GI-DOT-NEXT: mov v3.b[4], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[4], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #5]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #21]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #37]
-; CHECK-GI-DOT-NEXT: mov v2.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #37]
-; CHECK-GI-DOT-NEXT: mov v4.b[5], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #6]
-; CHECK-GI-DOT-NEXT: mov v5.b[5], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #22]
-; CHECK-GI-DOT-NEXT: mov v3.b[5], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[5], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #6]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #22]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #38]
-; CHECK-GI-DOT-NEXT: mov v2.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #38]
-; CHECK-GI-DOT-NEXT: mov v4.b[6], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #7]
-; CHECK-GI-DOT-NEXT: mov v5.b[6], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #23]
-; CHECK-GI-DOT-NEXT: mov v3.b[6], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[6], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #7]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #23]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #39]
-; CHECK-GI-DOT-NEXT: mov v2.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #39]
-; CHECK-GI-DOT-NEXT: mov v4.b[7], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #8]
-; CHECK-GI-DOT-NEXT: mov v5.b[7], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #24]
-; CHECK-GI-DOT-NEXT: mov v3.b[7], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[7], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #8]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #24]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #40]
-; CHECK-GI-DOT-NEXT: mov v2.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #40]
-; CHECK-GI-DOT-NEXT: mov v4.b[8], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #9]
-; CHECK-GI-DOT-NEXT: mov v5.b[8], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #25]
-; CHECK-GI-DOT-NEXT: mov v3.b[8], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[8], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #9]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #25]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #41]
-; CHECK-GI-DOT-NEXT: mov v2.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #41]
-; CHECK-GI-DOT-NEXT: mov v4.b[9], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #10]
-; CHECK-GI-DOT-NEXT: mov v5.b[9], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #26]
-; CHECK-GI-DOT-NEXT: mov v3.b[9], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[9], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #10]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #26]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #42]
-; CHECK-GI-DOT-NEXT: mov v2.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #42]
-; CHECK-GI-DOT-NEXT: mov v4.b[10], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #11]
-; CHECK-GI-DOT-NEXT: mov v5.b[10], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #27]
-; CHECK-GI-DOT-NEXT: mov v3.b[10], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[10], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #11]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #27]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #43]
-; CHECK-GI-DOT-NEXT: mov v2.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #43]
-; CHECK-GI-DOT-NEXT: mov v4.b[11], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #12]
-; CHECK-GI-DOT-NEXT: mov v5.b[11], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #28]
-; CHECK-GI-DOT-NEXT: mov v3.b[11], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[11], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #12]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #28]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #44]
-; CHECK-GI-DOT-NEXT: mov v2.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #44]
-; CHECK-GI-DOT-NEXT: mov v4.b[12], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #13]
-; CHECK-GI-DOT-NEXT: mov v5.b[12], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #29]
-; CHECK-GI-DOT-NEXT: mov v3.b[12], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[12], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #13]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #29]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #45]
-; CHECK-GI-DOT-NEXT: mov v2.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #45]
-; CHECK-GI-DOT-NEXT: mov v4.b[13], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #14]
-; CHECK-GI-DOT-NEXT: mov v5.b[13], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x0, #30]
-; CHECK-GI-DOT-NEXT: mov v3.b[13], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[13], v16.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #14]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #30]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #46]
-; CHECK-GI-DOT-NEXT: mov v2.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #46]
-; CHECK-GI-DOT-NEXT: mov v4.b[14], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x0, #15]
-; CHECK-GI-DOT-NEXT: mov v5.b[14], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x0, #31]
-; CHECK-GI-DOT-NEXT: mov v3.b[14], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[14], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v1.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: ldr b7, [x1, #15]
-; CHECK-GI-DOT-NEXT: ldr b17, [x1, #31]
-; CHECK-GI-DOT-NEXT: ldr b18, [x0, #47]
-; CHECK-GI-DOT-NEXT: mov v2.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: ldr b16, [x1, #47]
-; CHECK-GI-DOT-NEXT: mov v4.b[15], v7.b[0]
-; CHECK-GI-DOT-NEXT: movi v7.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: mov v5.b[15], v17.b[0]
-; CHECK-GI-DOT-NEXT: mov v3.b[15], v18.b[0]
-; CHECK-GI-DOT-NEXT: mov v6.b[15], v16.b[0]
-; CHECK-GI-DOT-NEXT: movi v16.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT: sdot v0.4s, v4.16b, v1.16b
-; CHECK-GI-DOT-NEXT: sdot v7.4s, v5.16b, v2.16b
-; CHECK-GI-DOT-NEXT: sdot v16.4s, v6.16b, v3.16b
+; CHECK-GI-DOT-NEXT: movi v1.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldr q7, [x0, #32]
+; CHECK-GI-DOT-NEXT: ldp q3, q4, [x0]
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: ldp q5, q6, [x1]
+; CHECK-GI-DOT-NEXT: ldr q16, [x1, #32]
+; CHECK-GI-DOT-NEXT: sdot v0.4s, v5.16b, v3.16b
+; CHECK-GI-DOT-NEXT: sdot v1.4s, v6.16b, v4.16b
+; CHECK-GI-DOT-NEXT: sdot v2.4s, v16.16b, v7.16b
; CHECK-GI-DOT-NEXT: addv s0, v0.4s
-; CHECK-GI-DOT-NEXT: addv s1, v7.4s
-; CHECK-GI-DOT-NEXT: addv s2, v16.4s
+; CHECK-GI-DOT-NEXT: addv s1, v1.4s
+; CHECK-GI-DOT-NEXT: addv s2, v2.4s
; CHECK-GI-DOT-NEXT: fmov w8, s0
; CHECK-GI-DOT-NEXT: fmov w9, s1
-; CHECK-GI-DOT-NEXT: fmov w10, s2
; CHECK-GI-DOT-NEXT: add w8, w8, w9
-; CHECK-GI-DOT-NEXT: add w0, w8, w10
+; CHECK-GI-DOT-NEXT: fmov w9, s2
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
; CHECK-GI-DOT-NEXT: ret
entry:
%a = load <48 x i8>, ptr %p1
diff --git a/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir b/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
index 53a8612..8e11424 100644
--- a/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
+++ b/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
@@ -64,6 +64,7 @@
name: foo
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
fixedStack: []
stack:
diff --git a/llvm/test/CodeGen/AArch64/xor.ll b/llvm/test/CodeGen/AArch64/xor.ll
index d92402c..7d7f7bf 100644
--- a/llvm/test/CodeGen/AArch64/xor.ll
+++ b/llvm/test/CodeGen/AArch64/xor.ll
@@ -51,7 +51,7 @@ define <4 x i32> @vec_add_of_not_decrement(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: vec_add_of_not_decrement:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn v1.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%t0 = sub <4 x i32> %x, %y
%r = sub <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/CodeGen/AArch64/zext.ll b/llvm/test/CodeGen/AArch64/zext.ll
index 54b29be..716d239 100644
--- a/llvm/test/CodeGen/AArch64/zext.ll
+++ b/llvm/test/CodeGen/AArch64/zext.ll
@@ -305,15 +305,14 @@ define <3 x i64> @zext_v3i8_v3i64(<3 x i8> %a) {
;
; CHECK-GI-LABEL: zext_v3i8_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d1, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK-GI-NEXT: movi v0.2d, #0x000000000000ff
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: movi v1.2d, #0x000000000000ff
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: and x8, x2, #0xff
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v1.d[1], x1
-; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: mov d1, v0.d[1]
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
@@ -470,15 +469,14 @@ define <3 x i64> @zext_v3i10_v3i64(<3 x i10> %a) {
;
; CHECK-GI-LABEL: zext_v3i10_v3i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-GI-NEXT: fmov d0, x0
-; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: fmov s0, w0
; CHECK-GI-NEXT: adrp x8, .LCPI27_0
; CHECK-GI-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI27_0]
; CHECK-GI-NEXT: and x8, x2, #0x3ff
; CHECK-GI-NEXT: fmov d2, x8
-; CHECK-GI-NEXT: mov v0.d[1], x1
+; CHECK-GI-NEXT: mov v0.s[1], w1
+; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: mov d1, v0.d[1]
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index 255c6de..1a76f8c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -1090,18 +1090,29 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB39_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB39_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1109,20 +1120,31 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB39_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB39_2
+; GFX90A-NEXT: .LBB39_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB39_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB39_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst
@@ -1132,26 +1154,47 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB40_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB40_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB40_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB40_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1161,18 +1204,29 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB41_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB41_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1180,20 +1234,31 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB41_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB41_2
+; GFX90A-NEXT: .LBB41_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB41_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB41_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst
@@ -1203,26 +1268,47 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(1) %ptr) #0 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB42_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB42_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB42_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB42_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1394,37 +1480,59 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrspace(1) %ptr) {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB49_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB49_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB49_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB49_2
+; GFX90A-NEXT: .LBB49_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB49_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB49_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1866,23 +1974,44 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr) #1 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB65_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB65_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB65_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB65_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -1892,23 +2021,44 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3) %ptr) #0 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB66_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB66_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB66_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB66_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -1918,44 +2068,66 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrspace(3) %ptr) #4 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB67_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, s0
-; GFX90A-NEXT: ds_read_b64 v[0:1], v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, s0
+; GFX90A-NEXT: ds_read_b64 v[2:3], v4
; GFX90A-NEXT: s_mov_b64 s[0:1], 0
-; GFX90A-NEXT: .LBB67_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB67_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX90A-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX90A-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[6:7], v[6:7] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execnz .LBB67_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB67_2
+; GFX90A-NEXT: .LBB67_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB67_3
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_mov_b32_e32 v2, s0
-; GFX940-NEXT: ds_read_b64 v[0:1], v2
+; GFX940-NEXT: v_mov_b32_e32 v4, s0
+; GFX940-NEXT: ds_read_b64 v[2:3], v4
; GFX940-NEXT: s_mov_b64 s[0:1], 0
-; GFX940-NEXT: .LBB67_1: ; %atomicrmw.start
+; GFX940-NEXT: .LBB67_2: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX940-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX940-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX940-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execnz .LBB67_1
-; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_cbranch_execnz .LBB67_2
+; GFX940-NEXT: .LBB67_3:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
index e288d9d..eafd1e1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
@@ -16,7 +16,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
@@ -40,7 +41,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p0), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32))
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
@@ -63,7 +65,8 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
@@ -87,7 +90,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY2]](s64), [[COPY1]](s64)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s64), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[ICMP]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = COPY $vgpr4_vgpr5
@@ -110,7 +114,8 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr3_vgpr4
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[ICMP]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s64) = COPY $vgpr1_vgpr2
%2:_(s64) = COPY $vgpr3_vgpr4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
index e9f8180..fed277d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
@@ -64,9 +64,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s32)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[CTLZ_ZERO_UNDEF]], [[C]]
- ; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[CTLZ_ZERO_UNDEF]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s16) = G_CTLZ_ZERO_UNDEF %0
%2:_(s32) = G_ZEXT %1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
index dba20e1..eb86a98 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
@@ -86,8 +86,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -117,8 +118,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
; CHECK-NEXT: $vgpr2 = COPY [[ZEXT]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -172,11 +174,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND2]](s32), [[AND3]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](<2 x s16>)
; CHECK-NEXT: $vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
@@ -360,13 +363,14 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR2]](s1)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR3]](s1)
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s16>) = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C3]]
; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32), [[AND6]](s32), [[AND7]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY5]](<4 x s16>)
; CHECK-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr1_vgpr2
@@ -403,11 +407,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
; CHECK-NEXT: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
index 93d0071..80b3166 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
@@ -955,15 +955,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX8-LABEL: name: saddsat_s64
@@ -980,15 +981,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX9-LABEL: name: saddsat_s64
@@ -1005,15 +1007,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -1043,15 +1046,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX6-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX6-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1060,13 +1064,14 @@ body: |
; GFX6-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX6-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX6-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX6-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX6-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX6-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1086,15 +1091,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX8-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX8-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1103,13 +1109,14 @@ body: |
; GFX8-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX8-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX8-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX8-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX8-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX8-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1129,15 +1136,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX9-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX9-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1146,13 +1154,14 @@ body: |
; GFX9-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX9-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX9-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX9-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX9-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX9-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
index 57b1ab9..220450c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
@@ -86,8 +86,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0 = COPY [[SUB]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -117,8 +118,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
; CHECK-NEXT: $vgpr2 = COPY [[ZEXT]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -172,11 +174,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND2]](s32), [[AND3]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](<2 x s16>)
; CHECK-NEXT: $vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
@@ -360,13 +363,14 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR2]](s1)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR3]](s1)
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s16>) = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C3]]
; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32), [[AND6]](s32), [[AND7]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY5]](<4 x s16>)
; CHECK-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr1_vgpr2
@@ -403,11 +407,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
; CHECK-NEXT: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
index 33a8cda..49fb6e9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
@@ -955,15 +955,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX6-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX8-LABEL: name: ssubsat_s64
@@ -980,15 +981,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX8-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX9-LABEL: name: ssubsat_s64
@@ -1005,15 +1007,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX9-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -1043,15 +1046,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX6-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX6-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX6-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1060,13 +1064,14 @@ body: |
; GFX6-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX6-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX6-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX6-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX6-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1086,15 +1091,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX8-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX8-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX8-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1103,13 +1109,14 @@ body: |
; GFX8-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX8-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX8-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX8-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX8-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1129,15 +1136,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX9-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX9-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX9-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1146,13 +1154,14 @@ body: |
; GFX9-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX9-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX9-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX9-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX9-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
index b4bc648..305eca7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
@@ -24,7 +24,7 @@ body: |
bb.0:
%0:_(s8) = G_CONSTANT i8 0
%1:_(p1) = G_CONSTANT i64 0
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.1:
G_STORE %0, %1 :: (store 1, addrspace 1)
@@ -55,7 +55,7 @@ body: |
; GCN-NEXT: S_ENDPGM 0
bb.0:
%0:_(s8) = G_CONSTANT i8 0
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
%1:_(p1) = G_CONSTANT i64 0
bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
index 623360f..de46037 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
@@ -147,6 +147,34 @@ main_body:
ret half %res
}
+define amdgpu_ps half @v_interp_rtz_f16(float inreg %i, float inreg %j, i32 inreg %m0) #0 {
+; GCN-LABEL: v_interp_rtz_f16:
+; GCN: ; %bb.0: ; %main_body
+; GCN-NEXT: s_mov_b32 s3, exec_lo
+; GCN-NEXT: s_wqm_b32 exec_lo, exec_lo
+; GCN-NEXT: s_mov_b32 m0, s2
+; GCN-NEXT: lds_param_load v1, attr0.x wait_vdst:15
+; GCN-NEXT: s_mov_b32 exec_lo, s3
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v2, s1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v3, v1, v0, v1 wait_exp:0
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v0, v1, v0, v1 op_sel:[1,0,1,0] wait_exp:7
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v3, v1, v2, v3 wait_exp:7
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v0, v1, v2, v0 op_sel:[1,0,0,0] wait_exp:7
+; GCN-NEXT: v_add_f16_e32 v0, v3, v0
+; GCN-NEXT: ; return to shader part epilog
+main_body:
+ %p0 = call float @llvm.amdgcn.lds.param.load(i32 0, i32 0, i32 %m0)
+ %l_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 0)
+ %l_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %l_p0, i1 0)
+ %h_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 1)
+ %h_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %h_p0, i1 1)
+ %res = fadd half %l_p1, %h_p1
+ ret half %res
+}
+
define amdgpu_ps half @v_interp_f16_imm_params(float inreg %i, float inreg %j) #0 {
; GCN-LABEL: v_interp_f16_imm_params:
; GCN: ; %bb.0: ; %main_body
@@ -172,6 +200,8 @@ declare float @llvm.amdgcn.interp.inreg.p10(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p2(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p10.f16(float, float, float, i1) #0
declare half @llvm.amdgcn.interp.inreg.p2.f16(float, float, float, i1) #0
+declare float @llvm.amdgcn.interp.p10.rtz.f16(float, float, float, i1) #0
+declare half @llvm.amdgcn.interp.p2.rtz.f16(float, float, float, i1) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare void @llvm.amdgcn.exp.f16(i32, i32, float, float, float, float, i1, i1) #0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
index 6eed92b..6d4aa3b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
@@ -670,36 +670,19 @@ define amdgpu_kernel void @bfe_sext_in_reg_i24(ptr addrspace(1) %out, ptr addrsp
define amdgpu_kernel void @simplify_demanded_bfe_sdiv(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; GFX6-LABEL: simplify_demanded_bfe_sdiv:
; GFX6: ; %bb.0:
-; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, 2.0
-; GFX6-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0
-; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_load_dword s0, s[6:7], 0x0
-; GFX6-NEXT: s_mov_b32 s6, -1
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: v_mul_lo_u32 v1, v0, -2
-; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_bfe_i32 s0, s0, 0x100001
-; GFX6-NEXT: s_ashr_i32 s2, s0, 31
-; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
-; GFX6-NEXT: s_add_i32 s0, s0, s2
-; GFX6-NEXT: s_xor_b32 s0, s0, s2
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v0
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v1
-; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_subrev_i32_e64 v2, s[0:1], 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s2, v0
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_bfe_i32 s3, s3, 0x100001
+; GFX6-NEXT: s_ashr_i32 s4, s3, 31
+; GFX6-NEXT: s_lshr_b32 s4, s4, 31
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: s_ashr_i32 s3, s3, 1
+; GFX6-NEXT: v_mov_b32_e32 v0, s3
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
%src = load i32, ptr addrspace(1) %in, align 4
%bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 1, i32 16)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
index 686b849..06bd45a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX12 %s
+; Note that TFE instructions don't have the result initialization to zero due to stopping before finalize-isel - which is where that's inserted
define amdgpu_ps float @struct_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; GFX8-LABEL: name: struct_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
index 9edc2455..1e3f94a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
+; Note that TFE instructions don't have the result initialization to zero due to stopping before finalize-isel - which is where that's inserted
define amdgpu_ps float @struct_ptr_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; CHECK-LABEL: name: struct_ptr_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
index d36f5c0..a6f9bb7e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
@@ -4142,11 +4142,11 @@ define i48 @v_saddsat_i48(i48 %lhs, i48 %rhs) {
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 16, v[2:3]
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4162,7 +4162,7 @@ define i48 @v_saddsat_i48(i48 %lhs, i48 %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s4
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4179,7 +4179,7 @@ define i48 @v_saddsat_i48(i48 %lhs, i48 %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, vcc_lo, s0
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4202,7 +4202,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX6-NEXT: s_ashr_i32 s2, s7, 31
; GFX6-NEXT: s_ashr_i32 s5, s7, 15
-; GFX6-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX6-NEXT: s_addk_i32 s2, 0x8000
; GFX6-NEXT: v_mov_b32_e32 v0, s5
; GFX6-NEXT: v_mov_b32_e32 v1, s2
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4227,7 +4227,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX8-NEXT: s_ashr_i32 s2, s7, 31
; GFX8-NEXT: s_ashr_i32 s5, s7, 15
-; GFX8-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX8-NEXT: s_addk_i32 s2, 0x8000
; GFX8-NEXT: v_mov_b32_e32 v0, s5
; GFX8-NEXT: v_mov_b32_e32 v1, s2
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4250,7 +4250,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4274,7 +4274,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4293,7 +4293,7 @@ define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4351,11 +4351,11 @@ define amdgpu_ps <2 x float> @saddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4371,7 +4371,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4388,7 +4388,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4442,15 +4442,15 @@ define amdgpu_ps <2 x float> @saddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX9-LABEL: saddsat_i48_vs:
; GFX9: ; %bb.0:
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[0:1], 16
-; GFX9-NEXT: v_mov_b32_e32 v3, s3
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[2:3], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], s[2:3], 0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4466,7 +4466,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4483,7 +4483,7 @@ define amdgpu_ps <2 x float> @saddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4529,11 +4529,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4546,7 +4546,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4560,7 +4560,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4578,7 +4578,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX6-NEXT: s_ashr_i32 s2, s5, 31
-; GFX6-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX6-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s2
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4599,7 +4599,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX8-NEXT: s_ashr_i32 s2, s5, 31
-; GFX8-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX8-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s2
; GFX8-NEXT: v_mov_b32_e32 v1, s3
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4620,7 +4620,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4641,7 +4641,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4657,7 +4657,7 @@ define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4702,11 +4702,11 @@ define amdgpu_ps <2 x float> @saddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4718,7 +4718,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4731,7 +4731,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4774,11 +4774,11 @@ define amdgpu_ps <2 x float> @saddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[0:1], s[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4790,7 +4790,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4803,7 +4803,7 @@ define amdgpu_ps <2 x float> @saddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4866,21 +4866,20 @@ define <2 x i64> @v_saddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v1, v5, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[8:9], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[4:5]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[4:5]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v9
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v0, v1
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v2, v6
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v3, v7, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[2:3]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], 0, v[6:7]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[6:7]
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, 0x80000000, v2
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4896,10 +4895,10 @@ define <2 x i64> @v_saddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[8:9], v[0:1]
; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[4:5]
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v11
-; GFX10-NEXT: v_cmp_gt_i64_e64 s6, 0, v[6:7]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v12
; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[10:11], v[2:3]
-; GFX10-NEXT: v_add_co_u32 v3, s7, 0x80000000, v4
+; GFX10-NEXT: v_cmp_gt_i64_e64 s6, 0, v[6:7]
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v12, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
@@ -4921,8 +4920,8 @@ define <2 x i64> @v_saddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v11
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[10:11], v[2:3]
; GFX11-NEXT: v_cmp_gt_i64_e64 s2, 0, v[6:7]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v12
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v12 :: v_dual_cndmask_b32 v1, v9, v1
; GFX11-NEXT: s_xor_b32 vcc_lo, s2, s1
@@ -4942,7 +4941,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
; GFX6-NEXT: s_ashr_i32 s4, s9, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v2, s8
@@ -4957,7 +4956,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX6-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
; GFX6-NEXT: s_ashr_i32 s4, s1, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v4, s0
@@ -4980,7 +4979,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
; GFX8-NEXT: s_ashr_i32 s4, s9, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s8
@@ -4995,7 +4994,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX8-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
; GFX8-NEXT: s_ashr_i32 s4, s1, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v4, s0
@@ -5018,7 +5017,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
; GFX9-NEXT: s_ashr_i32 s4, s9, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s8
@@ -5033,7 +5032,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
; GFX9-NEXT: s_ashr_i32 s4, s1, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v4, s0
@@ -5056,7 +5055,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[4:5], 0
; GFX10-NEXT: s_ashr_i32 s4, s9, 31
; GFX10-NEXT: v_mov_b32_e32 v1, s9
-; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s8, s1, s0
; GFX10-NEXT: s_add_u32 s0, s2, s6
; GFX10-NEXT: s_addc_u32 s1, s3, s7
@@ -5067,7 +5066,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX10-NEXT: s_ashr_i32 s4, s1, 31
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s1, s3, s2
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5085,7 +5084,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[4:5], 0
; GFX11-NEXT: s_ashr_i32 s4, s9, 31
-; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s8, s1, s0
; GFX11-NEXT: s_add_u32 s0, s2, s6
; GFX11-NEXT: s_addc_u32 s1, s3, s7
@@ -5095,7 +5094,7 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX11-NEXT: s_ashr_i32 s4, s1, 31
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s1, s3, s2
; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5132,7 +5131,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s9, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s4
; GFX6-NEXT: v_mov_b32_e32 v3, s5
@@ -5179,7 +5178,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s9, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s4
; GFX8-NEXT: v_mov_b32_e32 v3, s5
@@ -5226,7 +5225,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s9, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: v_mov_b32_e32 v3, s5
@@ -5269,7 +5268,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX10-NEXT: v_mov_b32_e32 v2, s5
; GFX10-NEXT: s_ashr_i32 s0, s9, 31
-; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX10-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX10-NEXT: v_mov_b32_e32 v1, s4
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
@@ -5310,7 +5309,7 @@ define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX11-NEXT: v_mov_b32_e32 v2, s5
; GFX11-NEXT: s_ashr_i32 s0, s9, 31
-; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX11-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 1, v0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5412,9 +5411,8 @@ define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v3, 31, v5
; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, 0, vcc
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v6
-; GFX9-NEXT: v_bfrev_b32_e32 v6, 1
-; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v3, v6
; GFX9-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX9-NEXT: v_add_u32_e32 v6, 0x80000000, v3
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
@@ -5440,7 +5438,7 @@ define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v5
; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
; GFX10-NEXT: v_xor_b32_e32 v2, v2, v6
-; GFX10-NEXT: v_add_co_u32 v6, s0, 0x80000000, v3
+; GFX10-NEXT: v_add_nc_u32_e32 v6, 0x80000000, v3
; GFX10-NEXT: v_and_b32_e32 v2, 1, v2
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
@@ -5467,7 +5465,7 @@ define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v5
; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
; GFX11-NEXT: v_xor_b32_e32 v2, v2, v6
-; GFX11-NEXT: v_add_co_u32 v6, null, 0x80000000, v3
+; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x80000000, v3
; GFX11-NEXT: v_and_b32_e32 v2, 1, v2
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
@@ -5569,9 +5567,8 @@ define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, s[0:1]
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
@@ -5597,9 +5594,9 @@ define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, v8, 0, s0
-; GFX10-NEXT: v_add_co_u32 v3, s0, 0x80000000, v2
; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5627,15 +5624,14 @@ define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v1, v0 :: v_dual_add_nc_u32 v3, 0x80000000, v2
; GFX11-NEXT: v_cndmask_b32_e64 v1, v8, 0, s0
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v2 :: v_dual_cndmask_b32 v3, v7, v3
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v2 :: v_dual_cndmask_b32 v3, v7, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
; GFX11-NEXT: ; return to shader part epilog
%result = call i128 @llvm.sadd.sat.i128(i128 %lhs, i128 %rhs)
%cast = bitcast i128 %result to <4 x float>
@@ -5762,12 +5758,11 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v17
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v2, vcc
@@ -5786,11 +5781,11 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v11
; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[14:15]
+; GFX9-NEXT: v_add_u32_e32 v7, 0x80000000, v6
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX9-NEXT: v_xor_b32_e32 v4, v5, v4
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 0x80000000, v6
; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v6, vcc
@@ -5832,18 +5827,18 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v19
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v17
-; GFX10-NEXT: v_add_co_u32 v7, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x80000000, v6
; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, 0, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v4, s4, 0x80000000, v3
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v3, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v16, v3, vcc_lo
-; GFX10-NEXT: v_and_b32_e32 v5, 1, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v3, vcc_lo
+; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v17
+; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v3
; GFX10-NEXT: v_cndmask_b32_e32 v3, v17, v4, vcc_lo
-; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v5
; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v6, v18, v6, s4
@@ -5882,18 +5877,17 @@ define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v19
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_add_co_u32 v7, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x80000000, v6
; GFX11-NEXT: v_cndmask_b32_e64 v2, v4, 0, vcc_lo
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: v_add_co_u32 v4, null, 0x80000000, v3
; GFX11-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v8, v3, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v16, v3 :: v_dual_and_b32 v5, 1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v3, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v17, v4, vcc_lo
-; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v5
+; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v17
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v2 :: v_dual_and_b32 v3, 1, v1
+; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v2, vcc_lo
+; GFX11-NEXT: v_dual_cndmask_b32 v2, v16, v2 :: v_dual_cndmask_b32 v3, v17, v4
; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v6, v18, v6, s0
@@ -5927,7 +5921,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s17, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s8
; GFX6-NEXT: v_mov_b32_e32 v3, s9
@@ -5960,7 +5954,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s4, s3, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s4
; GFX6-NEXT: v_mov_b32_e32 v2, s0
; GFX6-NEXT: v_mov_b32_e32 v3, s1
@@ -6011,7 +6005,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s17, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: v_mov_b32_e32 v3, s9
@@ -6050,7 +6044,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s4, s3, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s4
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: v_mov_b32_e32 v3, s1
@@ -6101,7 +6095,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s17, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s9
@@ -6140,7 +6134,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s4, s3, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: v_mov_b32_e32 v2, s0
; GFX9-NEXT: v_mov_b32_e32 v3, s1
@@ -6184,7 +6178,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: s_and_b32 s1, 1, s1
; GFX10-NEXT: s_ashr_i32 s10, s17, 31
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
-; GFX10-NEXT: s_add_u32 s11, s10, 0x80000000
+; GFX10-NEXT: s_add_i32 s11, s10, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX10-NEXT: s_add_u32 s0, s4, s12
@@ -6221,7 +6215,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1
; GFX10-NEXT: v_mov_b32_e32 v2, s17
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s10, vcc_lo
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: v_readfirstlane_b32 s1, v4
; GFX10-NEXT: v_and_b32_e32 v1, 1, v1
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo
@@ -6261,7 +6255,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: s_and_b32 s1, 1, s1
; GFX11-NEXT: s_ashr_i32 s10, s17, 31
; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
-; GFX11-NEXT: s_add_u32 s11, s10, 0x80000000
+; GFX11-NEXT: s_add_i32 s11, s10, 0x80000000
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0
; GFX11-NEXT: s_add_u32 s0, s4, s12
@@ -6299,7 +6293,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, s10, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: v_readfirstlane_b32 s1, v4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
index 1061f00..2c2f8e9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
@@ -279,125 +279,27 @@ define i32 @v_sdiv_i32_pow2k_denom(i32 %num) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, 0x45800000
-; CHECK-NEXT: v_mov_b32_e32 v3, 0xfffff000
-; CHECK-NEXT: v_mov_b32_e32 v4, 0x1000
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 20, v1
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
-; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v2
-; CHECK-NEXT: v_mul_lo_u32 v3, v2, v3
-; CHECK-NEXT: v_mul_hi_u32 v3, v2, v3
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT: v_mul_hi_u32 v2, v0, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, 12, v2
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v2
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[4:5]
-; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, 0x1000, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[4:5]
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v2
-; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i32 %num, 4096
ret i32 %result
}
define <2 x i32> @v_sdiv_v2i32_pow2k_denom(<2 x i32> %num) {
-; GISEL-LABEL: v_sdiv_v2i32_pow2k_denom:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v0
-; GISEL-NEXT: v_mov_b32_e32 v3, 0x1000
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; GISEL-NEXT: v_mov_b32_e32 v5, 0xfffff000
-; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v6
-; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x4f7ffffe, v4
-; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
-; GISEL-NEXT: v_cvt_u32_f32_e32 v4, v4
-; GISEL-NEXT: v_mul_lo_u32 v5, v4, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v4, v5
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v0, v4
-; GISEL-NEXT: v_mul_hi_u32 v4, v1, v4
-; GISEL-NEXT: v_lshlrev_b32_e32 v7, 12, v5
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v5
-; GISEL-NEXT: v_lshlrev_b32_e32 v9, 12, v4
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, 1, v4
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
-; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v7, vcc, v0, v3
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[6:7]
-; GISEL-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, 1, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v0, v5, v7, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc
-; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
-; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
-; GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; CGP-LABEL: v_sdiv_v2i32_pow2k_denom:
-; CGP: ; %bb.0:
-; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT: v_ashrrev_i32_e32 v2, 31, v0
-; CGP-NEXT: v_rcp_iflag_f32_e32 v3, 0x45800000
-; CGP-NEXT: v_mov_b32_e32 v4, 0xfffff000
-; CGP-NEXT: v_mov_b32_e32 v5, 0x1000
-; CGP-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v6
-; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
-; CGP-NEXT: v_cvt_u32_f32_e32 v3, v3
-; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
-; CGP-NEXT: v_mul_lo_u32 v4, v3, v4
-; CGP-NEXT: v_mul_hi_u32 v4, v3, v4
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v4
-; CGP-NEXT: v_mul_hi_u32 v4, v0, v3
-; CGP-NEXT: v_mul_hi_u32 v3, v1, v3
-; CGP-NEXT: v_lshlrev_b32_e32 v7, 12, v4
-; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; CGP-NEXT: v_lshlrev_b32_e32 v9, 12, v3
-; CGP-NEXT: v_add_i32_e32 v10, vcc, 1, v3
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; CGP-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[4:5]
-; CGP-NEXT: v_sub_i32_e32 v7, vcc, v0, v5
-; CGP-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v5
-; CGP-NEXT: v_cndmask_b32_e64 v3, v3, v10, s[6:7]
-; CGP-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
-; CGP-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; CGP-NEXT: v_add_i32_e32 v7, vcc, 1, v4
-; CGP-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
-; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v3
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5
-; CGP-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
-; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
-; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
-; CGP-NEXT: s_setpc_b64 s[30:31]
+; CHECK-LABEL: v_sdiv_v2i32_pow2k_denom:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v3, 31, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 20, v3
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 12, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv <2 x i32> %num, <i32 4096, i32 4096>
ret <2 x i32> %result
}
@@ -884,3 +786,24 @@ define <2 x i32> @v_sdiv_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
%result = sdiv <2 x i32> %num.mask, %den.mask
ret <2 x i32> %result
}
+
+define i32 @v_sdiv_i32_exact(i32 %num) {
+; CHECK-LABEL: v_sdiv_i32_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact i32 %num, 4096
+ ret i32 %result
+}
+
+define <2 x i32> @v_sdiv_v2i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: v_sdiv_v2i32_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 0a6b7af..377fa24 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -999,126 +999,11 @@ define i64 @v_sdiv_i64_pow2k_denom(i64 %num) {
; CHECK-LABEL: v_sdiv_i64_pow2k_denom:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_u32_e32 v2, 0x1000
-; CHECK-NEXT: v_cvt_f32_ubyte0_e32 v3, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0xfffff000
-; CHECK-NEXT: v_mac_f32_e32 v2, 0x4f800000, v3
-; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; CHECK-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
-; CHECK-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
-; CHECK-NEXT: v_trunc_f32_e32 v4, v3
-; CHECK-NEXT: v_mac_f32_e32 v2, 0xcf800000, v4
-; CHECK-NEXT: v_cvt_u32_f32_e32 v5, v2
-; CHECK-NEXT: v_cvt_u32_f32_e32 v7, v4
-; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
-; CHECK-NEXT: v_mul_hi_u32 v8, v5, v2
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT: v_mul_lo_u32 v4, v7, v2
-; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
-; CHECK-NEXT: v_mul_lo_u32 v9, v5, v3
-; CHECK-NEXT: v_mul_lo_u32 v10, v7, v3
-; CHECK-NEXT: v_mul_hi_u32 v11, v5, v3
-; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v9
-; CHECK-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v10, v2
-; CHECK-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v9, v4
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v11
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v8, vcc, v10, v8
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v8, v4
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v3, v4
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, v5, v2
-; CHECK-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
-; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
-; CHECK-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v1, v6, vcc
-; CHECK-NEXT: v_xor_b32_e32 v4, v0, v6
-; CHECK-NEXT: v_mul_lo_u32 v0, v7, v2
-; CHECK-NEXT: v_mul_lo_u32 v8, v5, v3
-; CHECK-NEXT: v_xor_b32_e32 v9, v1, v6
-; CHECK-NEXT: v_mul_hi_u32 v1, v5, v2
-; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v1, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; CHECK-NEXT: v_mul_hi_u32 v8, v5, v3
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v2
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v8
-; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v3, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v7, v1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v2, v9, v0
-; CHECK-NEXT: v_mul_lo_u32 v3, v4, v1
-; CHECK-NEXT: v_mul_hi_u32 v7, v4, v0
-; CHECK-NEXT: v_mul_hi_u32 v0, v9, v0
-; CHECK-NEXT: v_mov_b32_e32 v5, 0x1000
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v7
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v7, v9, v1
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT: v_mul_hi_u32 v3, v4, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v7, vcc, v0, v2
-; CHECK-NEXT: v_mul_hi_u32 v8, v9, v1
-; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v7, 0
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v8, v2
-; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v3, v[1:2]
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v4, v0
-; CHECK-NEXT: v_subb_u32_e64 v2, s[4:5], v9, v1, vcc
-; CHECK-NEXT: v_sub_i32_e64 v1, s[4:5], v9, v1
-; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
-; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cndmask_b32_e64 v2, -1, v4, s[4:5]
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v7
-; CHECK-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
-; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CHECK-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, 1, v4
-; CHECK-NEXT: v_addc_u32_e32 v5, vcc, 0, v8, vcc
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v1, v8, v5, vcc
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v7, v0, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v6
-; CHECK-NEXT: v_xor_b32_e32 v1, v1, v6
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 %num, 4096
ret i64 %result
@@ -1128,473 +1013,31 @@ define <2 x i64> @v_sdiv_v2i64_pow2k_denom(<2 x i64> %num) {
; GISEL-LABEL: v_sdiv_v2i64_pow2k_denom:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; GISEL-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
-; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
-; GISEL-NEXT: s_subb_u32 s7, 0, 0
-; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v6, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v7, v7
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], s6, v6, 0
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v7, v4
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
-; GISEL-NEXT: v_mul_hi_u32 v9, v6, v4
-; GISEL-NEXT: v_mul_hi_u32 v4, v7, v4
-; GISEL-NEXT: v_mul_lo_u32 v10, v6, v8
-; GISEL-NEXT: v_mul_lo_u32 v11, v7, v8
-; GISEL-NEXT: v_mul_hi_u32 v12, v6, v8
-; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v10
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v10, v5
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v11, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v10
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v6, v4
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v11, 0
-; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v7, v5, vcc
-; GISEL-NEXT: v_mov_b32_e32 v4, v9
-; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s6, v5, v[4:5]
; GISEL-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; GISEL-NEXT: v_lshrrev_b32_e32 v4, 20, v4
+; GISEL-NEXT: v_ashrrev_i32_e32 v5, 31, v3
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s7, v11, v[9:10]
-; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc
-; GISEL-NEXT: v_xor_b32_e32 v10, v0, v4
-; GISEL-NEXT: v_mul_lo_u32 v0, v5, v8
-; GISEL-NEXT: v_mul_lo_u32 v12, v11, v9
-; GISEL-NEXT: v_xor_b32_e32 v13, v1, v4
-; GISEL-NEXT: v_mul_hi_u32 v1, v11, v8
-; GISEL-NEXT: v_mul_hi_u32 v8, v5, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v1, v5, v9
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v12, v0
-; GISEL-NEXT: v_mul_hi_u32 v12, v11, v9
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT: v_mul_hi_u32 v9, v5, v9
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v9, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
-; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v13, v0
-; GISEL-NEXT: v_mul_lo_u32 v9, v10, v1
-; GISEL-NEXT: v_mul_hi_u32 v11, v10, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
-; GISEL-NEXT: v_mov_b32_e32 v5, 0x1000
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v11
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v11, v13, v1
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
-; GISEL-NEXT: v_mul_hi_u32 v9, v10, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v11, v9
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v0, v8
-; GISEL-NEXT: v_mul_hi_u32 v12, v13, v1
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v11, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
-; GISEL-NEXT: v_add_i32_e32 v12, vcc, v12, v8
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v5, v12, v[1:2]
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v10, v0
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], 0, v11, v[8:9]
-; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
-; GISEL-NEXT: s_subb_u32 s7, 0, 0
-; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], v13, v8, vcc
-; GISEL-NEXT: v_sub_i32_e64 v8, s[4:5], v13, v8
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
-; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v0, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v10, -1, v9, s[4:5]
-; GISEL-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v11
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v6, 0
-; GISEL-NEXT: v_addc_u32_e32 v14, vcc, 0, v12, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v8, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
-; GISEL-NEXT: v_cndmask_b32_e32 v15, -1, v8, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[1:2]
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, 1, v13
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
-; GISEL-NEXT: v_addc_u32_e32 v16, vcc, 0, v14, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
-; GISEL-NEXT: v_cndmask_b32_e32 v9, v13, v1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v1, v7, v0
-; GISEL-NEXT: v_mul_lo_u32 v13, v6, v8
-; GISEL-NEXT: v_mul_hi_u32 v15, v6, v0
-; GISEL-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc
-; GISEL-NEXT: v_mul_hi_u32 v0, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v15
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v15, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; GISEL-NEXT: v_mul_hi_u32 v13, v6, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v15, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v15, v13
-; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v6, v0
-; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v7, v1, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v8, 0
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; GISEL-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s6, v13, v[1:2]
-; GISEL-NEXT: v_xor_b32_e32 v1, v9, v4
-; GISEL-NEXT: v_ashrrev_i32_e32 v9, 31, v3
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s7, v8, v[6:7]
-; GISEL-NEXT: v_cndmask_b32_e32 v10, v12, v14, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v9
-; GISEL-NEXT: v_addc_u32_e32 v3, vcc, v3, v9, vcc
-; GISEL-NEXT: v_xor_b32_e32 v11, v2, v9
-; GISEL-NEXT: v_mul_lo_u32 v2, v13, v0
-; GISEL-NEXT: v_mul_lo_u32 v7, v8, v6
-; GISEL-NEXT: v_xor_b32_e32 v12, v3, v9
-; GISEL-NEXT: v_mul_hi_u32 v3, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v3, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v7, v2
-; GISEL-NEXT: v_mul_hi_u32 v7, v8, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v3, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GISEL-NEXT: v_mul_hi_u32 v6, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v6, v2
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_addc_u32_e32 v2, vcc, v13, v2, vcc
-; GISEL-NEXT: v_mul_lo_u32 v3, v12, v0
-; GISEL-NEXT: v_mul_lo_u32 v6, v11, v2
-; GISEL-NEXT: v_mul_hi_u32 v7, v11, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v12, v0
-; GISEL-NEXT: v_xor_b32_e32 v8, v10, v4
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v7, v12, v2
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v6, v3
-; GISEL-NEXT: v_mul_hi_u32 v6, v11, v2
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v3
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v2
-; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v10, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v7, v0
-; GISEL-NEXT: v_mov_b32_e32 v0, v3
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v5, v13, v[0:1]
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v1, v4
-; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v8, v4, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[4:5], 0, v10, v[6:7]
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v11, v2
-; GISEL-NEXT: v_subb_u32_e64 v4, s[4:5], v12, v3, vcc
-; GISEL-NEXT: v_sub_i32_e64 v3, s[4:5], v12, v3
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v5
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v4, -1, v6, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, 1, v10
-; GISEL-NEXT: v_addc_u32_e32 v7, vcc, 0, v13, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v6
-; GISEL-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
-; GISEL-NEXT: v_xor_b32_e32 v2, v2, v9
-; GISEL-NEXT: v_xor_b32_e32 v3, v3, v9
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v9
-; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; GISEL-NEXT: v_lshrrev_b32_e32 v5, 20, v5
+; GISEL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; GISEL-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GISEL-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; GISEL-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
; GISEL-NEXT: s_setpc_b64 s[30:31]
;
; CGP-LABEL: v_sdiv_v2i64_pow2k_denom:
; CGP: ; %bb.0:
; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; CGP-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
-; CGP-NEXT: v_mov_b32_e32 v6, 0xfffff000
-; CGP-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; CGP-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; CGP-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; CGP-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; CGP-NEXT: v_trunc_f32_e32 v7, v5
-; CGP-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; CGP-NEXT: v_cvt_u32_f32_e32 v8, v4
-; CGP-NEXT: v_cvt_u32_f32_e32 v9, v7
-; CGP-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v6, v8, 0
-; CGP-NEXT: v_mov_b32_e32 v7, v5
-; CGP-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v6, v9, v[7:8]
-; CGP-NEXT: v_mul_hi_u32 v12, v9, v4
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], -1, v8, v[10:11]
-; CGP-NEXT: v_mul_lo_u32 v10, v9, v4
-; CGP-NEXT: v_mul_hi_u32 v11, v8, v4
-; CGP-NEXT: v_mul_lo_u32 v4, v8, v13
-; CGP-NEXT: v_mul_lo_u32 v7, v9, v13
-; CGP-NEXT: v_mul_hi_u32 v14, v8, v13
-; CGP-NEXT: v_mul_hi_u32 v13, v9, v13
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v10, v4
-; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v4, v11
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v15, v4
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v12
-; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v14, vcc, v15, v14
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v7, v4
-; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v14, v7
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v13, v7
-; CGP-NEXT: v_add_i32_e32 v16, vcc, v8, v4
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v6, v16, 0
-; CGP-NEXT: v_addc_u32_e32 v17, vcc, v9, v7, vcc
-; CGP-NEXT: v_mov_b32_e32 v4, v14
-; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v6, v17, v[4:5]
-; CGP-NEXT: v_ashrrev_i32_e32 v7, 31, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], -1, v16, v[14:15]
-; CGP-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; CGP-NEXT: v_xor_b32_e32 v15, v0, v7
-; CGP-NEXT: v_mul_lo_u32 v0, v17, v13
-; CGP-NEXT: v_mul_lo_u32 v4, v16, v14
-; CGP-NEXT: v_xor_b32_e32 v18, v1, v7
-; CGP-NEXT: v_mul_hi_u32 v1, v16, v13
-; CGP-NEXT: v_mul_hi_u32 v13, v17, v13
+; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v1, v17, v14
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v4, v0
-; CGP-NEXT: v_mul_hi_u32 v4, v16, v14
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v13
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v4
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v13, v4
-; CGP-NEXT: v_mul_hi_u32 v13, v17, v14
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v4, v1
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
-; CGP-NEXT: v_addc_u32_e32 v1, vcc, v17, v1, vcc
-; CGP-NEXT: v_mul_lo_u32 v13, v18, v0
-; CGP-NEXT: v_mul_lo_u32 v14, v15, v1
-; CGP-NEXT: v_mul_hi_u32 v16, v15, v0
-; CGP-NEXT: v_mul_hi_u32 v0, v18, v0
-; CGP-NEXT: v_mov_b32_e32 v4, 0x1000
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v16
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v16, v18, v1
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
-; CGP-NEXT: v_mul_hi_u32 v14, v15, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
-; CGP-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v14, vcc, v16, v14
-; CGP-NEXT: v_add_i32_e32 v16, vcc, v0, v13
-; CGP-NEXT: v_mul_hi_u32 v17, v18, v1
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v16, 0
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
-; CGP-NEXT: v_add_i32_e32 v17, vcc, v17, v13
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v4, v17, v[1:2]
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v15, v0
-; CGP-NEXT: v_subb_u32_e64 v1, s[4:5], v18, v13, vcc
-; CGP-NEXT: v_sub_i32_e64 v13, s[4:5], v18, v13
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[4:5]
-; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; CGP-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v13, vcc
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT: v_subbrev_u32_e32 v13, vcc, 0, v1, vcc
-; CGP-NEXT: v_add_i32_e32 v15, vcc, 1, v16
-; CGP-NEXT: v_addc_u32_e32 v18, vcc, 0, v17, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
-; CGP-NEXT: v_mov_b32_e32 v0, v5
-; CGP-NEXT: v_cndmask_b32_e64 v14, -1, v14, s[4:5]
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v9, v[0:1]
-; CGP-NEXT: v_cndmask_b32_e64 v19, 0, -1, vcc
-; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v13
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], -1, v8, v[0:1]
-; CGP-NEXT: v_cndmask_b32_e32 v5, -1, v19, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, 1, v15
-; CGP-NEXT: v_mul_lo_u32 v19, v8, v0
-; CGP-NEXT: v_addc_u32_e32 v13, vcc, 0, v18, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v5, v15, v1, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v13, v18, v13, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v19
-; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v11
-; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v11, v9, v0
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; CGP-NEXT: v_mul_hi_u32 v10, v8, v0
-; CGP-NEXT: v_add_i32_e32 v11, vcc, v11, v12
-; CGP-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
-; CGP-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v11, vcc, v12, v11
-; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v10
-; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v1
-; CGP-NEXT: v_addc_u32_e32 v9, vcc, v9, v0, vcc
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v8, 0
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; CGP-NEXT: v_cndmask_b32_e32 v5, v16, v5, vcc
-; CGP-NEXT: v_xor_b32_e32 v11, v5, v7
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v6, v9, v[1:2]
-; CGP-NEXT: v_cndmask_b32_e32 v10, v17, v13, vcc
-; CGP-NEXT: v_xor_b32_e32 v1, v10, v7
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], -1, v8, v[5:6]
-; CGP-NEXT: v_ashrrev_i32_e32 v10, 31, v3
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v10
-; CGP-NEXT: v_addc_u32_e32 v3, vcc, v3, v10, vcc
-; CGP-NEXT: v_xor_b32_e32 v12, v2, v10
-; CGP-NEXT: v_mul_lo_u32 v2, v9, v0
-; CGP-NEXT: v_mul_lo_u32 v6, v8, v5
-; CGP-NEXT: v_xor_b32_e32 v13, v3, v10
-; CGP-NEXT: v_mul_hi_u32 v3, v8, v0
-; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v3, v9, v5
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v6, v2
-; CGP-NEXT: v_mul_hi_u32 v6, v8, v5
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v3, v0
-; CGP-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT: v_mul_hi_u32 v5, v9, v5
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v5, v2
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v8, v0
-; CGP-NEXT: v_addc_u32_e32 v2, vcc, v9, v2, vcc
-; CGP-NEXT: v_mul_lo_u32 v5, v13, v3
-; CGP-NEXT: v_mul_lo_u32 v6, v12, v2
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v11, v7
-; CGP-NEXT: v_subb_u32_e32 v1, vcc, v1, v7, vcc
-; CGP-NEXT: v_mul_hi_u32 v7, v12, v3
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v7, v13, v2
-; CGP-NEXT: v_mul_hi_u32 v3, v13, v3
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT: v_mul_hi_u32 v6, v12, v2
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v7, v3
-; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v3, v5
-; CGP-NEXT: v_mul_hi_u32 v8, v13, v2
-; CGP-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, v7, 0
-; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v5
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v4, v8, v[3:4]
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v12, v2
-; CGP-NEXT: v_subb_u32_e64 v3, s[4:5], v13, v5, vcc
-; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v13, v5
-; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v4
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
-; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
-; CGP-NEXT: v_cndmask_b32_e64 v3, -1, v6, s[4:5]
-; CGP-NEXT: v_add_i32_e32 v6, vcc, 1, v7
-; CGP-NEXT: v_addc_u32_e32 v9, vcc, 0, v8, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v6
-; CGP-NEXT: v_addc_u32_e32 v5, vcc, 0, v9, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; CGP-NEXT: v_cndmask_b32_e32 v2, v6, v4, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v4, v9, v5, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; CGP-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v3, v8, v4, vcc
-; CGP-NEXT: v_xor_b32_e32 v2, v2, v10
-; CGP-NEXT: v_xor_b32_e32 v3, v3, v10
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v10
-; CGP-NEXT: v_subb_u32_e32 v3, vcc, v3, v10, vcc
+; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; CGP-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
+; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4
+; CGP-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; CGP-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CGP-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
; CGP-NEXT: s_setpc_b64 s[30:31]
%result = sdiv <2 x i64> %num, <i64 4096, i64 4096>
ret <2 x i64> %result
@@ -3091,253 +2534,252 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_and_b32_e32 v1, 0xffffff, v4
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, 0, v1
-; GISEL-NEXT: v_addc_u32_e64 v1, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v3
+; GISEL-NEXT: v_add_i32_e64 v3, s[4:5], 0, 0
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, 0, v1
; GISEL-NEXT: v_cvt_f32_u32_e32 v5, v1
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, 0, v3
-; GISEL-NEXT: v_subb_u32_e32 v11, vcc, 0, v1, vcc
-; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
+; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v3
+; GISEL-NEXT: v_sub_i32_e32 v11, vcc, 0, v1
+; GISEL-NEXT: v_subb_u32_e32 v12, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v5, 0x4f800000, v4
+; GISEL-NEXT: v_rcp_iflag_f32_e32 v5, v5
; GISEL-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v9, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v12, v7
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_mul_hi_u32 v13, v9, v4
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v13, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v13, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v13, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
+; GISEL-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v5
+; GISEL-NEXT: v_mul_f32_e32 v7, 0x2f800000, v5
+; GISEL-NEXT: v_trunc_f32_e32 v9, v7
+; GISEL-NEXT: v_mac_f32_e32 v5, 0xcf800000, v9
+; GISEL-NEXT: v_cvt_u32_f32_e32 v10, v5
+; GISEL-NEXT: v_cvt_u32_f32_e32 v13, v9
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_mul_hi_u32 v14, v10, v7
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v14
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_mul_lo_u32 v14, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v14, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v14, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v4
-; GISEL-NEXT: v_addc_u32_e32 v12, vcc, v12, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, 0, v0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v0, v9, v4
-; GISEL-NEXT: v_addc_u32_e64 v11, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v9, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v5
+; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v13, v7, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_add_i32_e32 v11, vcc, 0, v0
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v0, v10, v7
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_and_b32_e32 v2, 0xffffff, v6
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v4, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v7, v4
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v8
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
-; GISEL-NEXT: v_addc_u32_e32 v4, vcc, v12, v4, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v11, v0
-; GISEL-NEXT: v_mul_lo_u32 v7, v10, v4
-; GISEL-NEXT: v_mul_hi_u32 v8, v10, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v11, v0
-; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v11, v4
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_mul_hi_u32 v7, v10, v4
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v10, v0
+; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v13, v5, vcc
+; GISEL-NEXT: v_mul_lo_u32 v7, v3, v0
+; GISEL-NEXT: v_mul_lo_u32 v8, v11, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v11, v0
+; GISEL-NEXT: v_mul_hi_u32 v0, v3, v0
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v9, v3, v5
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v0, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v11, v4
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v9, 0
+; GISEL-NEXT: v_mul_hi_u32 v8, v11, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v8
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v7
+; GISEL-NEXT: v_mul_hi_u32 v5, v3, v5
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v1, v10, 0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v3, v0, v[5:6]
-; GISEL-NEXT: v_and_b32_e32 v2, 0xffffff, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v1, v9, v[7:8]
-; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v10, v4
-; GISEL-NEXT: v_subb_u32_e64 v7, s[4:5], v11, v5, vcc
-; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v11, v5
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v1
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v1, v0, v[5:6]
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v10, v[8:9]
+; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v11, v7
+; GISEL-NEXT: v_subb_u32_e64 v7, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v3
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], 0, v2
-; GISEL-NEXT: v_addc_u32_e64 v2, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_cvt_f32_u32_e32 v11, v4
-; GISEL-NEXT: v_cvt_f32_u32_e32 v13, v2
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v7, v1
-; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v1, vcc
-; GISEL-NEXT: v_mac_f32_e32 v11, 0x4f800000, v13
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v7, v11
-; GISEL-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v6, v3
-; GISEL-NEXT: v_subbrev_u32_e32 v11, vcc, 0, v5, vcc
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v7
-; GISEL-NEXT: v_mul_f32_e32 v6, 0x2f800000, v5
-; GISEL-NEXT: v_trunc_f32_e32 v6, v6
-; GISEL-NEXT: v_mac_f32_e32 v5, 0xcf800000, v6
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v2, s[4:5], 0, v2
+; GISEL-NEXT: v_cvt_f32_u32_e32 v11, v2
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v7, v3
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v11, 0x4f800000, v4
+; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v11
+; GISEL-NEXT: v_cndmask_b32_e64 v7, v8, v9, s[4:5]
+; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v6, v1
+; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
+; GISEL-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v5, vcc
+; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
+; GISEL-NEXT: v_trunc_f32_e32 v5, v5
+; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v5
+; GISEL-NEXT: v_cvt_u32_f32_e32 v11, v4
+; GISEL-NEXT: v_sub_i32_e32 v14, vcc, 0, v2
; GISEL-NEXT: v_cvt_u32_f32_e32 v13, v5
-; GISEL-NEXT: v_sub_i32_e32 v15, vcc, 0, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v14, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v13, 0
-; GISEL-NEXT: v_subb_u32_e32 v16, vcc, 0, v2, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v15, v14, v[6:7]
-; GISEL-NEXT: v_add_i32_e32 v17, vcc, 1, v9
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v16, v13, v[6:7]
-; GISEL-NEXT: v_addc_u32_e32 v18, vcc, 0, v0, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v11, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, -1, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v10, v3
-; GISEL-NEXT: v_mul_lo_u32 v7, v14, v5
-; GISEL-NEXT: v_mul_lo_u32 v10, v13, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v11, v1
-; GISEL-NEXT: v_mul_hi_u32 v1, v13, v5
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v7, v1
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v14, v11, 0
+; GISEL-NEXT: v_subb_u32_e32 v15, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v14, v13, v[5:6]
+; GISEL-NEXT: v_add_i32_e32 v16, vcc, 1, v10
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v11, v[5:6]
+; GISEL-NEXT: v_addc_u32_e32 v17, vcc, 0, v0, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v9, v3
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, -1, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v8, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v6, v13, v4
+; GISEL-NEXT: v_mul_lo_u32 v8, v11, v5
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v9, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v18, v1, vcc
+; GISEL-NEXT: v_mul_hi_u32 v1, v11, v4
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, v6, v1
; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v7, v14, v6
-; GISEL-NEXT: v_mul_hi_u32 v5, v14, v5
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; GISEL-NEXT: v_mul_hi_u32 v10, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v10
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
-; GISEL-NEXT: v_mul_hi_u32 v6, v14, v6
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v5, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v13, v1
-; GISEL-NEXT: v_addc_u32_e32 v11, vcc, v14, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v10, 0
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v17
-; GISEL-NEXT: v_mov_b32_e32 v1, v6
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v15, v11, v[1:2]
-; GISEL-NEXT: v_addc_u32_e32 v14, vcc, 0, v18, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v16, v10, v[6:7]
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v13, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v18, v14, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; GISEL-NEXT: v_mul_lo_u32 v7, v11, v5
-; GISEL-NEXT: v_mul_lo_u32 v8, v10, v6
-; GISEL-NEXT: v_mul_hi_u32 v13, v10, v5
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
-; GISEL-NEXT: v_add_i32_e64 v9, s[4:5], 0, v12
-; GISEL-NEXT: v_addc_u32_e64 v12, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v7, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v7, v13
+; GISEL-NEXT: v_mul_lo_u32 v6, v13, v5
+; GISEL-NEXT: v_mul_hi_u32 v4, v13, v4
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
+; GISEL-NEXT: v_mul_hi_u32 v8, v11, v5
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
+; GISEL-NEXT: v_mul_hi_u32 v5, v13, v5
+; GISEL-NEXT: v_add_i32_e32 v1, vcc, v4, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, v11, v1
+; GISEL-NEXT: v_addc_u32_e32 v11, vcc, v13, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v14, v8, 0
+; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v16
+; GISEL-NEXT: v_mov_b32_e32 v1, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v14, v11, v[1:2]
+; GISEL-NEXT: v_addc_u32_e32 v18, vcc, 0, v17, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v8, v[5:6]
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v16, v13, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v8, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v10, v1, vcc
+; GISEL-NEXT: v_add_i32_e64 v10, s[4:5], 0, v12
+; GISEL-NEXT: v_mul_hi_u32 v12, v8, v4
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v13, v11, v6
-; GISEL-NEXT: v_mul_hi_u32 v5, v11, v5
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v8, v7
-; GISEL-NEXT: v_mul_hi_u32 v8, v10, v6
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v13, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v8, s[4:5], v13, v8
-; GISEL-NEXT: v_mul_hi_u32 v6, v11, v6
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v7
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v12
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v12, v11, v5
+; GISEL-NEXT: v_mul_hi_u32 v4, v11, v4
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
+; GISEL-NEXT: v_mul_hi_u32 v7, v8, v5
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v12, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v8, v7
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v7
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v10, v5
-; GISEL-NEXT: v_addc_u32_e64 v6, s[4:5], v11, v6, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v7, v12, v5
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v6
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v0, v3, vcc
-; GISEL-NEXT: v_mul_hi_u32 v0, v9, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v12, v5
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v7, v12, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v6
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
+; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v12, v7
+; GISEL-NEXT: v_mul_hi_u32 v5, v11, v5
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v6
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v6
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v8, v4
+; GISEL-NEXT: v_addc_u32_e64 v5, s[4:5], v11, v5, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v6, v3, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v10, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v0, v9, vcc
+; GISEL-NEXT: v_mul_hi_u32 v0, v10, v4
+; GISEL-NEXT: v_mul_hi_u32 v4, v3, v4
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v5, v0
-; GISEL-NEXT: v_mul_hi_u32 v10, v12, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v4, v8, 0
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v6, v3, v5
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v0
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v4, v10, v[0:1]
+; GISEL-NEXT: v_mul_hi_u32 v7, v10, v5
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v4, v0
+; GISEL-NEXT: v_mul_hi_u32 v9, v3, v5
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v2, v7, 0
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v2, v9, v[0:1]
; GISEL-NEXT: v_subrev_i32_e32 v0, vcc, 0, v1
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v2, v8, v[6:7]
-; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v3, vcc
-; GISEL-NEXT: v_sub_i32_e32 v3, vcc, v9, v5
-; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v12, v6, vcc
-; GISEL-NEXT: v_sub_i32_e64 v6, s[4:5], v12, v6
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v5, v2
-; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v6, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v3, v4
-; GISEL-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v5, v2
-; GISEL-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v5, v7, v9, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, 1, v8
-; GISEL-NEXT: v_addc_u32_e32 v9, vcc, 0, v10, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v6, v2
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v7, v[5:6]
+; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v10, v4
+; GISEL-NEXT: v_subb_u32_e64 v6, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v3
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v4, v2
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v4, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v6, v3
+; GISEL-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v6, v8, v10, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v7
+; GISEL-NEXT: v_addc_u32_e32 v10, vcc, 0, v9, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v5, v3
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v3, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v6, v2
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v11, v3, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v7
-; GISEL-NEXT: v_addc_u32_e32 v4, vcc, 0, v9, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v5, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v11, v2, vcc
+; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v8
+; GISEL-NEXT: v_addc_u32_e32 v4, vcc, 0, v10, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v4, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v8, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v10, v4, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
; GISEL-NEXT: v_subrev_i32_e32 v2, vcc, 0, v2
; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -3399,3 +2841,24 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
%result = sdiv <2 x i64> %num.mask, %den.mask
ret <2 x i64> %result
}
+
+define i64 @v_sdiv_i64_exact(i64 %num) {
+; CHECK-LABEL: v_sdiv_i64_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact i64 %num, 4096
+ ret i64 %result
+}
+
+define <2 x i64> @v_sdiv_v2i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: v_sdiv_v2i64_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CHECK-NEXT: v_ashr_i64 v[2:3], v[2:3], 10
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
index c455b24..83ebc84 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
@@ -3034,253 +3034,251 @@ define <2 x i64> @v_srem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_and_b32_e32 v1, 0xffffff, v4
+; GISEL-NEXT: v_add_i32_e64 v3, s[4:5], 0, 0
; GISEL-NEXT: v_add_i32_e32 v1, vcc, 0, v1
-; GISEL-NEXT: v_addc_u32_e64 v3, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v1
-; GISEL-NEXT: v_cvt_f32_u32_e32 v5, v3
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, 0, v1
-; GISEL-NEXT: v_subb_u32_e32 v11, vcc, 0, v3, vcc
-; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
+; GISEL-NEXT: v_cvt_f32_u32_e32 v5, v1
+; GISEL-NEXT: v_cvt_f32_u32_e32 v4, v3
+; GISEL-NEXT: v_sub_i32_e32 v11, vcc, 0, v1
+; GISEL-NEXT: v_subb_u32_e32 v12, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v5, 0x4f800000, v4
+; GISEL-NEXT: v_rcp_iflag_f32_e32 v5, v5
; GISEL-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v9, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v12, v7
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_mul_hi_u32 v13, v9, v4
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v13, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v13, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v13, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
+; GISEL-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v5
+; GISEL-NEXT: v_mul_f32_e32 v7, 0x2f800000, v5
+; GISEL-NEXT: v_trunc_f32_e32 v9, v7
+; GISEL-NEXT: v_mac_f32_e32 v5, 0xcf800000, v9
+; GISEL-NEXT: v_cvt_u32_f32_e32 v10, v5
+; GISEL-NEXT: v_cvt_u32_f32_e32 v13, v9
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_mul_hi_u32 v14, v10, v7
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v14
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_mul_lo_u32 v14, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v14, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v14, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v4
-; GISEL-NEXT: v_addc_u32_e32 v12, vcc, v12, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v10, v9, 0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v10, v12, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, 0, v0
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v9, v[7:8]
-; GISEL-NEXT: v_mul_hi_u32 v0, v9, v4
-; GISEL-NEXT: v_addc_u32_e64 v11, s[4:5], 0, 0, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v9, v7
-; GISEL-NEXT: v_mul_hi_u32 v4, v12, v4
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v9, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v5
+; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v13, v7, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v11, v10, 0
+; GISEL-NEXT: v_mov_b32_e32 v5, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v11, v13, v[5:6]
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v7
+; GISEL-NEXT: v_add_i32_e32 v11, vcc, 0, v0
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v10, v[8:9]
+; GISEL-NEXT: v_mul_hi_u32 v0, v10, v7
+; GISEL-NEXT: v_mul_hi_u32 v7, v13, v7
+; GISEL-NEXT: v_mul_lo_u32 v9, v10, v8
+; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v8, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v7
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v4, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v7, v4
+; GISEL-NEXT: v_mul_lo_u32 v5, v13, v8
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
-; GISEL-NEXT: v_addc_u32_e32 v4, vcc, v12, v4, vcc
-; GISEL-NEXT: v_mul_lo_u32 v5, v11, v0
-; GISEL-NEXT: v_mul_lo_u32 v7, v10, v4
-; GISEL-NEXT: v_mul_hi_u32 v8, v10, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v11, v0
-; GISEL-NEXT: v_and_b32_e32 v12, 0xffffff, v2
+; GISEL-NEXT: v_mul_hi_u32 v9, v10, v8
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
+; GISEL-NEXT: v_mul_hi_u32 v8, v13, v8
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v11, v4
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT: v_mul_hi_u32 v7, v10, v4
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v10, v0
+; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v13, v5, vcc
+; GISEL-NEXT: v_mul_lo_u32 v7, v3, v0
+; GISEL-NEXT: v_mul_lo_u32 v8, v11, v5
+; GISEL-NEXT: v_mul_hi_u32 v9, v11, v0
+; GISEL-NEXT: v_mul_hi_u32 v0, v3, v0
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v9
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v9, v3, v5
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v7
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v0, v5
-; GISEL-NEXT: v_mul_hi_u32 v8, v11, v4
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v9, 0
+; GISEL-NEXT: v_mul_hi_u32 v8, v11, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v8
+; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v7
+; GISEL-NEXT: v_mul_hi_u32 v5, v3, v5
+; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v1, v10, 0
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v8, v0
-; GISEL-NEXT: v_mov_b32_e32 v0, v5
-; GISEL-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v1, v7, v[0:1]
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v9, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v8
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v1, v5, v[0:1]
; GISEL-NEXT: v_and_b32_e32 v0, 0xffffff, v6
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v9, v[7:8]
-; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v10, v4
-; GISEL-NEXT: v_subb_u32_e64 v9, s[4:5], v11, v5, vcc
-; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v11, v5
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v9, v3
+; GISEL-NEXT: v_sub_i32_e32 v7, vcc, v11, v7
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v10, v[8:9]
+; GISEL-NEXT: v_subb_u32_e64 v8, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v3
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[4:5]
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
; GISEL-NEXT: v_add_i32_e64 v2, s[4:5], 0, v0
-; GISEL-NEXT: v_addc_u32_e64 v4, s[4:5], 0, 0, s[4:5]
; GISEL-NEXT: v_cvt_f32_u32_e32 v0, v2
-; GISEL-NEXT: v_cvt_f32_u32_e32 v10, v4
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v9, v3
-; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v5, v3, vcc
-; GISEL-NEXT: v_mac_f32_e32 v0, 0x4f800000, v10
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v8, v3
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v6, v9, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v5, v3, vcc
+; GISEL-NEXT: v_mac_f32_e32 v0, 0x4f800000, v4
; GISEL-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v11, v6, v7, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v8, v1
+; GISEL-NEXT: v_sub_i32_e32 v11, vcc, v7, v1
+; GISEL-NEXT: v_subbrev_u32_e64 v13, s[4:5], 0, v10, vcc
; GISEL-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v0
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v0, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v15, v0
-; GISEL-NEXT: v_subbrev_u32_e64 v14, s[4:5], 0, v13, vcc
-; GISEL-NEXT: v_sub_i32_e64 v16, s[4:5], 0, v2
-; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, v4, s[4:5]
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v16, v15, 0
-; GISEL-NEXT: v_cvt_u32_f32_e32 v18, v7
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v14, v3
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, -1, s[4:5]
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v16, v18, v[0:1]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v10, v1
+; GISEL-NEXT: v_mul_f32_e32 v4, 0x2f800000, v0
+; GISEL-NEXT: v_trunc_f32_e32 v6, v4
+; GISEL-NEXT: v_mac_f32_e32 v0, 0xcf800000, v6
+; GISEL-NEXT: v_cvt_u32_f32_e32 v14, v0
+; GISEL-NEXT: v_sub_i32_e64 v15, s[4:5], 0, v2
+; GISEL-NEXT: v_subb_u32_e64 v16, s[4:5], 0, v3, s[4:5]
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v15, v14, 0
+; GISEL-NEXT: v_cvt_u32_f32_e32 v17, v6
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v13, v3
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, -1, s[4:5]
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v15, v17, v[0:1]
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v11, v1
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v17, v15, v[6:7]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v14, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v7, v19, v0, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v0, v18, v5
-; GISEL-NEXT: v_mul_lo_u32 v19, v15, v6
-; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v13, v3, vcc
-; GISEL-NEXT: v_mul_hi_u32 v13, v15, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v16, v14, v[5:6]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v13, v3
+; GISEL-NEXT: v_cndmask_b32_e64 v6, v18, v0, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v0, v17, v4
+; GISEL-NEXT: v_mul_lo_u32 v18, v14, v5
+; GISEL-NEXT: v_mul_hi_u32 v19, v14, v4
+; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v10, v3, vcc
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v19
-; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v13
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v13, v18, v6
-; GISEL-NEXT: v_mul_hi_u32 v5, v18, v5
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v19, v0
-; GISEL-NEXT: v_mul_hi_u32 v19, v15, v6
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v13, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v19
+; GISEL-NEXT: v_mul_lo_u32 v19, v17, v5
+; GISEL-NEXT: v_mul_hi_u32 v4, v17, v4
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v18, v0
+; GISEL-NEXT: v_mul_hi_u32 v18, v14, v5
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v19, v4
; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v13, v19
-; GISEL-NEXT: v_mul_hi_u32 v6, v18, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v5, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v13, v5
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v15, v0
-; GISEL-NEXT: v_addc_u32_e32 v15, vcc, v18, v5, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v16, v13, 0
-; GISEL-NEXT: v_sub_i32_e32 v18, vcc, v10, v1
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v16, v15, v[0:1]
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v17, v13, v[0:1]
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
-; GISEL-NEXT: v_cndmask_b32_e32 v6, v10, v18, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v14, v3, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
-; GISEL-NEXT: v_mul_lo_u32 v6, v15, v5
-; GISEL-NEXT: v_mul_lo_u32 v7, v13, v0
-; GISEL-NEXT: v_mul_hi_u32 v11, v13, v5
-; GISEL-NEXT: v_add_i32_e64 v8, s[4:5], 0, v12
-; GISEL-NEXT: v_addc_u32_e64 v10, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v6, v11
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v11, v15, v0
-; GISEL-NEXT: v_mul_hi_u32 v5, v15, v5
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
-; GISEL-NEXT: v_mul_hi_u32 v7, v13, v0
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v11, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, v19, v18
+; GISEL-NEXT: v_mul_hi_u32 v5, v17, v5
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, v4, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v18, v4
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v5, v4
+; GISEL-NEXT: v_add_i32_e32 v14, vcc, v14, v0
+; GISEL-NEXT: v_addc_u32_e32 v17, vcc, v17, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v15, v14, 0
+; GISEL-NEXT: v_sub_i32_e32 v18, vcc, v11, v1
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v15, v17, v[0:1]
+; GISEL-NEXT: v_subbrev_u32_e32 v10, vcc, 0, v10, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v16, v14, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v11, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v6, v13, v10, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v7, v5, vcc
+; GISEL-NEXT: v_mul_lo_u32 v5, v17, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v14, v0
+; GISEL-NEXT: v_mul_hi_u32 v10, v14, v4
+; GISEL-NEXT: v_add_i32_e64 v9, s[4:5], 0, v12
; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v11, v7
-; GISEL-NEXT: v_mul_hi_u32 v0, v15, v0
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GISEL-NEXT: v_add_i32_e64 v6, s[4:5], v7, v6
-; GISEL-NEXT: v_add_i32_e64 v0, s[4:5], v0, v6
-; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v13, v5
-; GISEL-NEXT: v_addc_u32_e64 v0, s[4:5], v15, v0, s[4:5]
-; GISEL-NEXT: v_mul_lo_u32 v6, v10, v5
-; GISEL-NEXT: v_mul_lo_u32 v7, v8, v0
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
-; GISEL-NEXT: v_mul_hi_u32 v9, v8, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v10, v5
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v5, v10
+; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v10, v17, v0
+; GISEL-NEXT: v_mul_hi_u32 v4, v17, v4
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v7, v5
+; GISEL-NEXT: v_mul_hi_u32 v7, v14, v0
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v10, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v7
+; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v7, s[4:5], v10, v7
+; GISEL-NEXT: v_mul_hi_u32 v0, v17, v0
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v4, v5
+; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e64 v5, s[4:5], v7, v5
+; GISEL-NEXT: v_add_i32_e64 v0, s[4:5], v0, v5
+; GISEL-NEXT: v_add_i32_e64 v4, s[4:5], v14, v4
+; GISEL-NEXT: v_addc_u32_e64 v0, s[4:5], v17, v0, s[4:5]
+; GISEL-NEXT: v_mul_lo_u32 v5, v3, v4
+; GISEL-NEXT: v_mul_lo_u32 v7, v9, v0
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v8, v6, vcc
+; GISEL-NEXT: v_mul_hi_u32 v6, v9, v4
+; GISEL-NEXT: v_mul_hi_u32 v4, v3, v4
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v9
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GISEL-NEXT: v_mul_lo_u32 v6, v3, v0
+; GISEL-NEXT: v_add_i32_e32 v5, vcc, v7, v5
+; GISEL-NEXT: v_mul_hi_u32 v7, v9, v0
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v6, v4
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v9, v10, v0
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; GISEL-NEXT: v_mul_hi_u32 v7, v8, v0
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v7
+; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v7
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v9, v7
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v5, v6
-; GISEL-NEXT: v_mul_hi_u32 v0, v10, v0
-; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v2, v9, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v11
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, v0, v7
-; GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v2, v7, v[0:1]
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v7
+; GISEL-NEXT: v_add_i32_e32 v7, vcc, v4, v5
+; GISEL-NEXT: v_mul_hi_u32 v0, v3, v0
+; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v2, v7, 0
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v10
+; GISEL-NEXT: v_add_i32_e32 v6, vcc, v0, v6
+; GISEL-NEXT: v_mov_b32_e32 v0, v5
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v2, v6, v[0:1]
; GISEL-NEXT: v_subrev_i32_e32 v0, vcc, 0, v1
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v4, v9, v[6:7]
-; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v3, vcc
-; GISEL-NEXT: v_sub_i32_e32 v3, vcc, v8, v5
-; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v10, v6, vcc
-; GISEL-NEXT: v_sub_i32_e64 v6, s[4:5], v10, v6
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v5, v4
+; GISEL-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v3, v7, v[5:6]
+; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v9, v4
+; GISEL-NEXT: v_subb_u32_e64 v6, s[4:5], v3, v5, vcc
+; GISEL-NEXT: v_sub_i32_e64 v5, s[4:5], v3, v5
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v3
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v3, v2
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v4, v2
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v5, v4
-; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v6, v4, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v6, v3
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v5, v3, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v3, v2
-; GISEL-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v6, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v9, v4
+; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v4, v2
+; GISEL-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v5, vcc
+; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v9, v3
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[4:5]
; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v2
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v9, v4
-; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v6, v4, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], v9, v3
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v5, v3, vcc
; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v8, v2
; GISEL-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[4:5]
-; GISEL-NEXT: v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GISEL-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v4, v9, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc
; GISEL-NEXT: v_subrev_i32_e32 v2, vcc, 0, v2
; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
index 61e1e67..320dfbb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
@@ -4142,11 +4142,11 @@ define i48 @v_ssubsat_i48(i48 %lhs, i48 %rhs) {
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 16, v[2:3]
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4162,7 +4162,7 @@ define i48 @v_ssubsat_i48(i48 %lhs, i48 %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s4
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4179,7 +4179,7 @@ define i48 @v_ssubsat_i48(i48 %lhs, i48 %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, vcc_lo, s0
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4202,7 +4202,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX6-NEXT: s_ashr_i32 s2, s7, 31
; GFX6-NEXT: s_ashr_i32 s5, s7, 15
-; GFX6-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX6-NEXT: s_addk_i32 s2, 0x8000
; GFX6-NEXT: v_mov_b32_e32 v0, s5
; GFX6-NEXT: v_mov_b32_e32 v1, s2
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4227,7 +4227,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX8-NEXT: s_ashr_i32 s2, s7, 31
; GFX8-NEXT: s_ashr_i32 s5, s7, 15
-; GFX8-NEXT: s_add_u32 s2, s2, 0xffff8000
+; GFX8-NEXT: s_addk_i32 s2, 0x8000
; GFX8-NEXT: v_mov_b32_e32 v0, s5
; GFX8-NEXT: v_mov_b32_e32 v1, s2
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4250,7 +4250,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4274,7 +4274,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4293,7 +4293,7 @@ define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4351,11 +4351,11 @@ define amdgpu_ps <2 x float> @ssubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4371,7 +4371,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4388,7 +4388,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4442,15 +4442,15 @@ define amdgpu_ps <2 x float> @ssubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX9-LABEL: ssubsat_i48_vs:
; GFX9: ; %bb.0:
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[0:1], 16
-; GFX9-NEXT: v_mov_b32_e32 v3, s3
-; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[2:3], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], s[2:3], 0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4466,7 +4466,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4483,7 +4483,7 @@ define amdgpu_ps <2 x float> @ssubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1]
@@ -4529,11 +4529,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4546,7 +4546,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
@@ -4560,7 +4560,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v6
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v6
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4578,7 +4578,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX6-NEXT: s_ashr_i32 s2, s5, 31
-; GFX6-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX6-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s2
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: v_mov_b32_e32 v2, s4
@@ -4599,7 +4599,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX8-NEXT: s_ashr_i32 s2, s5, 31
-; GFX8-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX8-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s2
; GFX8-NEXT: v_mov_b32_e32 v1, s3
; GFX8-NEXT: v_mov_b32_e32 v2, s4
@@ -4620,7 +4620,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
-; GFX9-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX9-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_mov_b32_e32 v2, s4
@@ -4641,7 +4641,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX10-NEXT: v_mov_b32_e32 v1, s5
; GFX10-NEXT: s_ashr_i32 s2, s5, 31
-; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX10-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX10-NEXT: s_xor_b32 s0, s1, s0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4657,7 +4657,7 @@ define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
; GFX11-NEXT: s_ashr_i32 s2, s5, 31
-; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000
+; GFX11-NEXT: s_add_i32 s3, s2, 0x80000000
; GFX11-NEXT: s_xor_b32 s0, s1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0
@@ -4702,11 +4702,11 @@ define amdgpu_ps <2 x float> @ssubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v1, vcc
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[2:3]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], 0, v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], 0, v[0:1]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[2:3], s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4718,7 +4718,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX10-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4731,7 +4731,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[2:3]
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4774,11 +4774,11 @@ define amdgpu_ps <2 x float> @ssubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s0, v0
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, 0x80000000, v0
-; GFX9-NEXT: s_xor_b64 vcc, s[0:1], s[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT: ; return to shader part epilog
@@ -4790,7 +4790,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_add_co_u32 v1, s1, 0x80000000, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4803,7 +4803,7 @@ define amdgpu_ps <2 x float> @ssubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], 0
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-NEXT: ; return to shader part epilog
@@ -4866,21 +4866,20 @@ define <2 x i64> @v_ssubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v0, v4
; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v1, v5, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[8:9], v[0:1]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[4:5]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[4:5]
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v9
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v0, v1
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v2, v6
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v7, vcc
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[4:5], v[2:3]
-; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], 0, v[6:7]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[6:7]
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v5
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, 0x80000000, v2
-; GFX9-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4896,10 +4895,10 @@ define <2 x i64> @v_ssubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[8:9], v[0:1]
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[4:5]
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v11
-; GFX10-NEXT: v_cmp_lt_i64_e64 s6, 0, v[6:7]
-; GFX10-NEXT: v_add_co_u32 v1, s5, 0x80000000, v12
; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[10:11], v[2:3]
-; GFX10-NEXT: v_add_co_u32 v3, s7, 0x80000000, v4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s6, 0, v[6:7]
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v12, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
@@ -4921,8 +4920,8 @@ define <2 x i64> @v_ssubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v11
; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[10:11], v[2:3]
; GFX11-NEXT: v_cmp_lt_i64_e64 s2, 0, v[6:7]
-; GFX11-NEXT: v_add_co_u32 v1, null, 0x80000000, v12
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x80000000, v12
+; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v4
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v12 :: v_dual_cndmask_b32 v1, v9, v1
; GFX11-NEXT: s_xor_b32 vcc_lo, s2, s1
@@ -4942,7 +4941,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
; GFX6-NEXT: s_ashr_i32 s4, s9, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v2, s8
@@ -4957,7 +4956,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX6-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
; GFX6-NEXT: s_ashr_i32 s4, s1, 31
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: v_mov_b32_e32 v4, s0
@@ -4980,7 +4979,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
; GFX8-NEXT: s_ashr_i32 s4, s9, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s8
@@ -4995,7 +4994,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX8-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
; GFX8-NEXT: s_ashr_i32 s4, s1, 31
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v4, s0
@@ -5018,7 +5017,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
; GFX9-NEXT: s_ashr_i32 s4, s9, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s8
@@ -5033,7 +5032,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
; GFX9-NEXT: s_ashr_i32 s4, s1, 31
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v4, s0
@@ -5056,7 +5055,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[4:5], 0
; GFX10-NEXT: s_ashr_i32 s4, s9, 31
; GFX10-NEXT: v_mov_b32_e32 v1, s9
-; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s8, s1, s0
; GFX10-NEXT: s_sub_u32 s0, s2, s6
; GFX10-NEXT: s_subb_u32 s1, s3, s7
@@ -5067,7 +5066,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX10-NEXT: s_ashr_i32 s4, s1, 31
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: s_xor_b32 s1, s3, s2
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5085,7 +5084,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[4:5], 0
; GFX11-NEXT: s_ashr_i32 s4, s9, 31
-; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s8, s1, s0
; GFX11-NEXT: s_sub_u32 s0, s2, s6
; GFX11-NEXT: s_subb_u32 s1, s3, s7
@@ -5095,7 +5094,7 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8
; GFX11-NEXT: s_ashr_i32 s4, s1, 31
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: s_xor_b32 s1, s3, s2
; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1
; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1
@@ -5134,7 +5133,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s11, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s8
; GFX6-NEXT: v_mov_b32_e32 v3, s9
@@ -5183,7 +5182,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s11, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: v_mov_b32_e32 v3, s9
@@ -5232,7 +5231,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s11, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s9
@@ -5274,7 +5273,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
-; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX10-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX10-NEXT: v_mov_b32_e32 v2, s9
; GFX10-NEXT: v_mov_b32_e32 v3, s11
@@ -5317,7 +5316,7 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
-; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX11-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v2 :: v_dual_mov_b32 v2, s9
; GFX11-NEXT: v_mov_b32_e32 v3, s11
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
@@ -5427,9 +5426,8 @@ define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v8
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
@@ -5456,7 +5454,7 @@ define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v3, s0, 0x80000000, v2
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX10-NEXT: v_xor_b32_e32 v0, v0, v8
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5484,8 +5482,7 @@ define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
; GFX11-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v2
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v1, v0 :: v_dual_add_nc_u32 v3, 0x80000000, v2
; GFX11-NEXT: v_xor_b32_e32 v0, v0, v8
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
@@ -5594,9 +5591,8 @@ define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v7
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
@@ -5625,7 +5621,7 @@ define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
-; GFX10-NEXT: v_add_co_u32 v3, s0, 0x80000000, v2
+; GFX10-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v8, vcc_lo
; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
@@ -5652,12 +5648,12 @@ define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, s[2:3], 0
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX11-NEXT: v_cndmask_b32_e64 v9, 0, 1, s0
; GFX11-NEXT: s_and_b32 s0, 1, s4
-; GFX11-NEXT: v_add_co_u32 v3, null, 0x80000000, v2
+; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v7
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x80000000, v2
; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v8, vcc_lo
; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
@@ -5805,9 +5801,8 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v19
-; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
-; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v2, v1
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_add_u32_e32 v3, 0x80000000, v2
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v2, vcc
@@ -5831,8 +5826,8 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
; GFX9-NEXT: v_xor_b32_e32 v4, v5, v4
; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v11
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 0x80000000, v6
; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX9-NEXT: v_add_u32_e32 v7, 0x80000000, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v6, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v6, vcc
@@ -5877,18 +5872,18 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v21
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v19
-; GFX10-NEXT: v_add_co_u32 v7, s5, 0x80000000, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x80000000, v6
; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v4, s4, 0x80000000, v3
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v3, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v18, v3, vcc_lo
-; GFX10-NEXT: v_and_b32_e32 v5, 1, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v3, vcc_lo
+; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v19
+; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v3
; GFX10-NEXT: v_cndmask_b32_e32 v3, v19, v4, vcc_lo
-; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v5
; GFX10-NEXT: v_cndmask_b32_e64 v4, v8, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v5, v9, v6, s4
; GFX10-NEXT: v_cndmask_b32_e64 v6, v20, v6, s4
@@ -5931,18 +5926,16 @@ define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v21
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[14:15]
-; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v19
+; GFX11-NEXT: v_dual_cndmask_b32 v2, v5, v4 :: v_dual_add_nc_u32 v7, 0x80000000, v6
+; GFX11-NEXT: v_xor_b32_e32 v1, v2, v1
+; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v19
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_add_co_u32 v7, null, 0x80000000, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x80000000, v2
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: v_add_co_u32 v4, null, 0x80000000, v3
-; GFX11-NEXT: v_xor_b32_e32 v1, v2, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v16, v3, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v18, v3 :: v_dual_and_b32 v5, 1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v17, v3, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v19, v4, vcc_lo
-; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v5
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v16, v2 :: v_dual_and_b32 v3, 1, v1
+; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v17, v2, vcc_lo
+; GFX11-NEXT: v_dual_cndmask_b32 v2, v18, v2 :: v_dual_cndmask_b32 v3, v19, v4
; GFX11-NEXT: v_cndmask_b32_e64 v4, v8, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v5, v9, v6, s0
; GFX11-NEXT: v_cndmask_b32_e64 v6, v20, v6, s0
@@ -5978,7 +5971,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s0, s19, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX6-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s0
; GFX6-NEXT: v_mov_b32_e32 v2, s16
; GFX6-NEXT: v_mov_b32_e32 v3, s17
@@ -6013,7 +6006,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX6-NEXT: s_ashr_i32 s4, s3, 31
; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX6-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX6-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX6-NEXT: v_mov_b32_e32 v1, s4
; GFX6-NEXT: v_mov_b32_e32 v2, s0
; GFX6-NEXT: v_mov_b32_e32 v3, s1
@@ -6066,7 +6059,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s0, s19, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX8-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s0
; GFX8-NEXT: v_mov_b32_e32 v2, s16
; GFX8-NEXT: v_mov_b32_e32 v3, s17
@@ -6107,7 +6100,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX8-NEXT: s_ashr_i32 s4, s3, 31
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX8-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX8-NEXT: v_mov_b32_e32 v1, s4
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: v_mov_b32_e32 v3, s1
@@ -6160,7 +6153,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s0, s19, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s1, s0, 0x80000000
+; GFX9-NEXT: s_add_i32 s1, s0, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: v_mov_b32_e32 v2, s16
; GFX9-NEXT: v_mov_b32_e32 v3, s17
@@ -6201,7 +6194,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_ashr_i32 s4, s3, 31
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: s_add_u32 s5, s4, 0x80000000
+; GFX9-NEXT: s_add_i32 s5, s4, 0x80000000
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: v_mov_b32_e32 v2, s0
; GFX9-NEXT: v_mov_b32_e32 v3, s1
@@ -6244,7 +6237,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: s_cselect_b32 s1, 1, 0
; GFX10-NEXT: s_ashr_i32 s8, s17, 31
; GFX10-NEXT: s_and_b32 s1, 1, s1
-; GFX10-NEXT: s_add_u32 s9, s8, 0x80000000
+; GFX10-NEXT: s_add_i32 s9, s8, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
@@ -6273,7 +6266,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX10-NEXT: s_cselect_b32 s5, 1, 0
; GFX10-NEXT: s_ashr_i32 s4, s3, 31
; GFX10-NEXT: s_and_b32 s5, 1, s5
-; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX10-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s6
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5
@@ -6326,7 +6319,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: s_cselect_b32 s1, 1, 0
; GFX11-NEXT: s_ashr_i32 s8, s19, 31
; GFX11-NEXT: s_and_b32 s1, 1, s1
-; GFX11-NEXT: s_add_u32 s9, s8, 0x80000000
+; GFX11-NEXT: s_add_i32 s9, s8, 0x80000000
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
@@ -6357,7 +6350,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e64 v4, 0, 1, s6
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5
-; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000
+; GFX11-NEXT: s_add_i32 s0, s4, 0x80000000
; GFX11-NEXT: v_dual_cndmask_b32 v2, v4, v3 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s18
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
index 887c43f..d155513 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
@@ -2062,13 +2062,9 @@ define <2 x i64> @v_udiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_mul_hi_u32 v17, v2, v5
; GISEL-NEXT: v_mul_hi_u32 v5, 0, v5
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v13, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v15
-; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v16, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v14
@@ -2077,10 +2073,6 @@ define <2 x i64> @v_udiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v17
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v12, v8
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v13, v9
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v15, v10
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v16, v11
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
index 5c6bb6d..07480a0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
@@ -2480,13 +2480,9 @@ define <2 x i64> @v_urem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_mul_hi_u32 v17, v2, v5
; GISEL-NEXT: v_mul_hi_u32 v5, 0, v5
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v13, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v10, vcc, v10, v15
-; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v16, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v14
@@ -2495,10 +2491,6 @@ define <2 x i64> @v_urem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v17
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v12, v8
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v13, v9
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v15, v10
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v16, v11
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v6, v8
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GISEL-NEXT: v_add_i32_e32 v7, vcc, v7, v10
diff --git a/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir b/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir
new file mode 100644
index 0000000..cba114c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir
@@ -0,0 +1,68 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=finalize-isel -o - %s | FileCheck -check-prefix=GFX11 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -run-pass=finalize-isel -o - %s | FileCheck -check-prefix=GFX12 %s
+
+---
+name: reg_ops
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: reg_ops
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[DEF1:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[DEF1]].sub0
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[DEF1]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY2]], implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY1]], [[COPY3]], implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: reg_ops
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[DEF1:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 [[DEF]], [[DEF1]]
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = IMPLICIT_DEF
+ %2:sreg_64 = S_ADD_U64_PSEUDO %0, %1, implicit-def $scc
+...
+
+---
+name: lhs_imm
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: lhs_imm
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 6565, [[COPY]], implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 0, [[COPY1]], implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: lhs_imm
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 6565, [[DEF]]
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = S_ADD_U64_PSEUDO 6565, %0, implicit-def $scc
+...
+
+---
+name: rhs_imm
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: rhs_imm
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], 6565, implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY1]], 0, implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: rhs_imm
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 [[DEF]], 6565
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = S_ADD_U64_PSEUDO %0, 6565, implicit-def $scc
+...
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
index 66034af..cff9ce0 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
@@ -233,9 +233,9 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
; AKF_HSA: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/allow-check.ll b/llvm/test/CodeGen/AMDGPU/allow-check.ll
new file mode 100644
index 0000000..d4f5621
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/allow-check.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=0 -fast-isel=1 | FileCheck %s
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, 1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, 1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll
new file mode 100644
index 0000000..33b1cc6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll
@@ -0,0 +1,255 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 4
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -passes=amdgpu-attributor %s | FileCheck %s
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg_def() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_def(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[DEF:%.*]] = call i32 asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ %def = call i32 asm sideeffect "; def $0", "=a"()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[DEF:%.*]] = call i64 asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ %def = call i64 asm sideeffect "; def $0", "={a[0:1]}"()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg_second_arg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_second_arg(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "v,a"(i32 poison, i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_non_agpr_asm() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_non_agpr_asm(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "v"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a0}"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg_tuple() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_tuple(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a[0:1]}"(i64 poison)
+ ret void
+}
+
+define void @func_uses_asm_virtreg_agpr() {
+; CHECK-LABEL: define void @func_uses_asm_virtreg_agpr(
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ ret void
+}
+
+define void @func_uses_asm_physreg_agpr() {
+; CHECK-LABEL: define void @func_uses_asm_physreg_agpr(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a0}"(i32 poison)
+ ret void
+}
+
+define void @func_uses_asm_physreg_agpr_tuple() {
+; CHECK-LABEL: define void @func_uses_asm_physreg_agpr_tuple(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a[0:1]}"(i64 poison)
+ ret void
+}
+
+declare void @unknown()
+
+define amdgpu_kernel void @kernel_calls_extern() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern(
+; CHECK-SAME: ) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: call void @unknown()
+; CHECK-NEXT: ret void
+;
+ call void @unknown()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_extern_marked_callsite() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern_marked_callsite(
+; CHECK-SAME: ) #[[ATTR4]] {
+; CHECK-NEXT: call void @unknown() #[[ATTR9:[0-9]+]]
+; CHECK-NEXT: ret void
+;
+ call void @unknown() #0
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_indirect(ptr %indirect) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect(
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: call void [[INDIRECT]]()
+; CHECK-NEXT: ret void
+;
+ call void %indirect()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(ptr %indirect) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: call void [[INDIRECT]]() #[[ATTR9]]
+; CHECK-NEXT: ret void
+;
+ call void %indirect() #0
+ ret void
+}
+
+define amdgpu_kernel void @kernel_transitively_uses_agpr_asm() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_transitively_uses_agpr_asm(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void @func_uses_asm_physreg_agpr()
+; CHECK-NEXT: ret void
+;
+ call void @func_uses_asm_physreg_agpr()
+ ret void
+}
+
+define void @empty() {
+; CHECK-LABEL: define void @empty(
+; CHECK-SAME: ) #[[ATTR5:[0-9]+]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
+define void @also_empty() {
+; CHECK-LABEL: define void @also_empty(
+; CHECK-SAME: ) #[[ATTR5]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_empty() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_empty(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: call void @empty()
+; CHECK-NEXT: ret void
+;
+ call void @empty()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_non_agpr_and_agpr() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_non_agpr_and_agpr(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void @empty()
+; CHECK-NEXT: call void @func_uses_asm_physreg_agpr()
+; CHECK-NEXT: ret void
+;
+ call void @empty()
+ call void @func_uses_asm_physreg_agpr()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_generic_intrinsic(ptr %ptr0, ptr %ptr1, i64 %size) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_generic_intrinsic(
+; CHECK-SAME: ptr [[PTR0:%.*]], ptr [[PTR1:%.*]], i64 [[SIZE:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[PTR0]], ptr [[PTR1]], i64 [[SIZE]], i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy.p0.p0.i64(ptr %ptr0, ptr %ptr1, i64 %size, i1 false)
+ ret void
+}
+
+declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg)
+
+define amdgpu_kernel void @kernel_calls_mfma.f32.32x32x1f32(ptr addrspace(1) %out, float %a, float %b, <32 x float> %c) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_mfma.f32.32x32x1f32(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], float [[A:%.*]], float [[B:%.*]], <32 x float> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[RESULT:%.*]] = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float [[A]], float [[B]], <32 x float> [[C]], i32 0, i32 0, i32 0)
+; CHECK-NEXT: store <32 x float> [[RESULT]], ptr addrspace(1) [[OUT]], align 128
+; CHECK-NEXT: ret void
+;
+ %result = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %a, float %b, <32 x float> %c, i32 0, i32 0, i32 0)
+ store <32 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_workitem_id_x(ptr addrspace(1) %out) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_workitem_id_x(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: store i32 [[RESULT]], ptr addrspace(1) [[OUT]], align 4
+; CHECK-NEXT: ret void
+;
+ %result = call i32 @llvm.amdgcn.workitem.id.x()
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @indirect_calls_none_agpr(i1 %cond) {
+; CHECK-LABEL: define amdgpu_kernel void @indirect_calls_none_agpr(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[FPTR:%.*]] = select i1 [[COND]], ptr @empty, ptr @also_empty
+; CHECK-NEXT: call void [[FPTR]]()
+; CHECK-NEXT: ret void
+;
+ %fptr = select i1 %cond, ptr @empty, ptr @also_empty
+ call void %fptr()
+ ret void
+}
+
+
+attributes #0 = { "amdgpu-no-agpr" }
+;.
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6:[0-9]+]] = { convergent nocallback nofree nosync nounwind willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR7:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-agpr" }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
index 192bf7c..93b9aea 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-break-large-phis.ll
@@ -1197,3 +1197,54 @@ reallyfinally:
store <5 x double> %val, ptr %out, align 1
ret void
}
+
+define amdgpu_kernel void @pr85718(i1 %Bool, ptr %Ptr, <4 x float> %Vec1, <4 x float> %Vec2) {
+; OPT-LABEL: @pr85718(
+; OPT-NEXT: BB0:
+; OPT-NEXT: [[I:%.*]] = insertelement <4 x float> [[VEC1:%.*]], float 4.200000e+01, i1 true
+; OPT-NEXT: br label [[BB1:%.*]]
+; OPT: BB1:
+; OPT-NEXT: [[TMP0:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE0:%.*]], [[BB2:%.*]] ], [ [[LARGEPHI_EXTRACTSLICE1:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0:%.*]] ]
+; OPT-NEXT: [[TMP1:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE3:%.*]], [[BB2]] ], [ [[LARGEPHI_EXTRACTSLICE4:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0]] ]
+; OPT-NEXT: [[TMP2:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE6:%.*]], [[BB2]] ], [ [[LARGEPHI_EXTRACTSLICE7:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0]] ]
+; OPT-NEXT: [[TMP3:%.*]] = phi float [ [[LARGEPHI_EXTRACTSLICE9:%.*]], [[BB2]] ], [ [[LARGEPHI_EXTRACTSLICE10:%.*]], [[BB1]] ], [ 0.000000e+00, [[BB0]] ]
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE0:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE1:%.*]] = insertelement <4 x float> [[LARGEPHI_INSERTSLICE0]], float [[TMP1]], i64 1
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE2:%.*]] = insertelement <4 x float> [[LARGEPHI_INSERTSLICE1]], float [[TMP2]], i64 2
+; OPT-NEXT: [[LARGEPHI_INSERTSLICE3:%.*]] = insertelement <4 x float> [[LARGEPHI_INSERTSLICE2]], float [[TMP3]], i64 3
+; OPT-NEXT: store <4 x float> [[LARGEPHI_INSERTSLICE3]], ptr [[PTR:%.*]], align 128
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE1]] = extractelement <4 x float> [[VEC2:%.*]], i64 0
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE4]] = extractelement <4 x float> [[VEC2]], i64 1
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE7]] = extractelement <4 x float> [[VEC2]], i64 2
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE10]] = extractelement <4 x float> [[VEC2]], i64 3
+; OPT-NEXT: br i1 [[BOOL:%.*]], label [[BB1]], label [[BB2]]
+; OPT: BB2:
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE0]] = extractelement <4 x float> [[I]], i64 0
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE3]] = extractelement <4 x float> [[I]], i64 1
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE6]] = extractelement <4 x float> [[I]], i64 2
+; OPT-NEXT: [[LARGEPHI_EXTRACTSLICE9]] = extractelement <4 x float> [[I]], i64 3
+; OPT-NEXT: br label [[BB1]]
+;
+; NOOPT-LABEL: @pr85718(
+; NOOPT-NEXT: BB0:
+; NOOPT-NEXT: [[I:%.*]] = insertelement <4 x float> [[VEC1:%.*]], float 4.200000e+01, i1 true
+; NOOPT-NEXT: br label [[BB1:%.*]]
+; NOOPT: BB1:
+; NOOPT-NEXT: [[PHI:%.*]] = phi <4 x float> [ [[I]], [[BB2:%.*]] ], [ [[VEC2:%.*]], [[BB1]] ], [ zeroinitializer, [[BB0:%.*]] ]
+; NOOPT-NEXT: store <4 x float> [[PHI]], ptr [[PTR:%.*]], align 128
+; NOOPT-NEXT: br i1 [[BOOL:%.*]], label [[BB1]], label [[BB2]]
+; NOOPT: BB2:
+; NOOPT-NEXT: br label [[BB1]]
+;
+BB0:
+ %I = insertelement <4 x float> %Vec1, float 4.200000e+01, i1 true
+ br label %BB1
+
+BB1: ; preds = %BB0, %BB1, %BB2
+ %PHI = phi <4 x float> [ %I, %BB2 ], [ %Vec2, %BB1 ], [ zeroinitializer, %BB0 ]
+ store <4 x float> %PHI, ptr %Ptr, align 128
+ br i1 %Bool, label %BB1, label %BB2
+
+BB2: ; preds = %BB1
+ br label %BB1
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index d900165..2ad28b8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -10668,3 +10668,111 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
store <2 x i64> %r, ptr addrspace(1) %out
ret void
}
+
+define <2 x i32> @v_sdiv_i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: @v_sdiv_i32_exact(
+; CHECK: %1 = extractelement <2 x i32> %num, i64 0
+; CHECK-NEXT: %2 = sdiv exact i32 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i32> poison, i32 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i32> %num, i64 1
+; CHECK-NEXT: %5 = sdiv exact i32 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i32> %3, i32 %5, i64 1
+; CHECK-NEXT: ret <2 x i32> %6
+;
+; GFX6-LABEL: v_sdiv_i32_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sdiv_i32_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; GFX9-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
+
+define <2 x i64> @v_sdiv_i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: @v_sdiv_i64_exact(
+; CHECK: %1 = extractelement <2 x i64> %num, i64 0
+; CHECK-NEXT: %2 = sdiv exact i64 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i64> poison, i64 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i64> %num, i64 1
+; CHECK-NEXT: %5 = sdiv exact i64 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i64> %3, i64 %5, i64 1
+; CHECK-NEXT: ret <2 x i64> %6
+;
+; GFX6-LABEL: v_sdiv_i64_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; GFX6-NEXT: v_ashr_i64 v[2:3], v[2:3], 10
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sdiv_i64_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], 12, v[0:1]
+; GFX9-NEXT: v_ashrrev_i64 v[2:3], 10, v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
+
+define <2 x i32> @v_udiv_i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: @v_udiv_i32_exact(
+; CHECK: %1 = extractelement <2 x i32> %num, i64 0
+; CHECK-NEXT: %2 = udiv exact i32 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i32> poison, i32 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i32> %num, i64 1
+; CHECK-NEXT: %5 = udiv exact i32 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i32> %3, i32 %5, i64 1
+; CHECK-NEXT: ret <2 x i32> %6
+;
+; GFX6-LABEL: v_udiv_i32_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 12, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 10, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_udiv_i32_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 12, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = udiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
+
+define <2 x i64> @v_udiv_i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: @v_udiv_i64_exact(
+; CHECK: %1 = extractelement <2 x i64> %num, i64 0
+; CHECK-NEXT: %2 = udiv exact i64 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i64> poison, i64 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i64> %num, i64 1
+; CHECK-NEXT: %5 = udiv exact i64 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i64> %3, i64 %5, i64 1
+; CHECK-NEXT: ret <2 x i64> %6
+;
+; GFX6-LABEL: v_udiv_i64_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[0:1], 12
+; GFX6-NEXT: v_lshr_b64 v[2:3], v[2:3], 10
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_udiv_i64_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], 12, v[0:1]
+; GFX9-NEXT: v_lshrrev_b64 v[2:3], 10, v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = udiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index 942f459..8ddaf24 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -808,7 +808,7 @@ define float @test_pown_fast_f32_nobuiltin(float %x, i32 %y) {
; CHECK-LABEL: define float @test_pown_fast_f32_nobuiltin
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z4pownfi(float [[X]], i32 [[Y]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z4pownfi(float [[X]], i32 [[Y]]) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: ret float [[CALL]]
;
entry:
@@ -820,11 +820,11 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
; CHECK-LABEL: define float @test_pown_fast_f32_strictfp
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]])
-; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]])
-; CHECK-NEXT: [[POWNI2F:%.*]] = sitofp i32 [[Y]] to float
-; CHECK-NEXT: [[__YLOGX:%.*]] = fmul fast float [[__LOG2]], [[POWNI2F]]
-; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]])
+; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR0]]
+; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR0]]
+; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR0]]
; CHECK-NEXT: [[__YEVEN:%.*]] = shl i32 [[Y]], 31
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[X]] to i32
; CHECK-NEXT: [[__POW_SIGN:%.*]] = and i32 [[__YEVEN]], [[TMP0]]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
index 2ffa647..2e64a34 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
@@ -896,7 +896,7 @@ define float @test_rootn_f32__y_neg2__strictfp(float %x) #1 {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__strictfp(
; CHECK-SAME: float [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]]) #[[ATTR0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
index af0eb23..3d4ae84d9 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -1025,33 +1025,33 @@ attributes #6 = { "enqueued-block" }
; AKF_HSA: attributes #[[ATTR8]] = { "amdgpu-calls" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR15]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR15]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR19]] = { nounwind sanitize_address "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR20]] = { nounwind sanitize_address "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR22]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR19]] = { nounwind sanitize_address "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR20]] = { nounwind sanitize_address "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR22]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR23:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR24:[0-9]+]] = { "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR25]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR25]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR26]] = { "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR27]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR27]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR28]] = { nounwind }
; ATTRIBUTOR_HSA: attributes #[[ATTR29]] = { "enqueued-block" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index 9a9c28a..43cdf85 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -643,19 +643,19 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-stack-objects" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
; AKF_HSA: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
index 6c5e58c..547ff69 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
@@ -393,17 +393,18 @@ define amdgpu_kernel void @use_get_local_size_z(ptr addrspace(1) %ptr) #1 {
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
+;.
; AKF_CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
index 1ebd864..2970495 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
@@ -477,7 +477,6 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1032-NEXT: s_cbranch_execz .LBB1_3
; GFX1032-NEXT: ; %bb.2:
; GFX1032-NEXT: v_mov_b32_e32 v0, s11
-; GFX1032-NEXT: s_mov_b32 s10, s11
; GFX1032-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
; GFX1032-NEXT: .LBB1_3:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
@@ -615,7 +614,6 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1132-NEXT: s_cbranch_execz .LBB1_3
; GFX1132-NEXT: ; %bb.2:
; GFX1132-NEXT: v_mov_b32_e32 v0, s11
-; GFX1132-NEXT: s_mov_b32 s10, s11
; GFX1132-NEXT: buffer_atomic_add_u32 v0, off, s[4:7], 0 glc
; GFX1132-NEXT: .LBB1_3:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s9
diff --git a/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir b/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir
index c1da29e..3228962 100644
--- a/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir
+++ b/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir
@@ -14,6 +14,8 @@
---
name: test_av_spill_cross_bb_usage
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4 }
machineFunctionInfo:
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
new file mode 100644
index 0000000..7108f3d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -march=amdgcn -mcpu=gfx940 < %s | FileCheck --check-prefixes=GCN %s
+
+; TODO: Add global-isel when it can support bf16
+
+define amdgpu_ps float @v_test_cvt_bf16_f32_v(bfloat %v) {
+; GCN-LABEL: v_test_cvt_bf16_f32_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = fpext bfloat %v to float
+ ret float %cvt
+}
+
+define amdgpu_ps float @v_test_cvt_bf16_f32_s(bfloat inreg %v) {
+; GCN-LABEL: v_test_cvt_bf16_f32_s:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_lshl_b32 s0, s0, 16
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = fpext bfloat %v to float
+ ret float %cvt
+}
+
+define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_v(<2 x float> %src) {
+; GCN-LABEL: v_test_cvt_v2f32_v2bf16_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
+; GCN-NEXT: ; return to shader part epilog
+ %res = fptrunc <2 x float> %src to <2 x bfloat>
+ %cast = bitcast <2 x bfloat> %res to float
+ ret float %cast
+}
+
+define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_s(<2 x float> inreg %src) {
+; GCN-LABEL: v_test_cvt_v2f32_v2bf16_s:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_bfe_u32 s2, s1, 0x10010
+; GCN-NEXT: s_add_i32 s2, s2, s1
+; GCN-NEXT: s_or_b32 s4, s1, 0x400000
+; GCN-NEXT: s_add_i32 s5, s2, 0x7fff
+; GCN-NEXT: v_cmp_u_f32_e64 s[2:3], s1, s1
+; GCN-NEXT: s_and_b64 s[2:3], s[2:3], exec
+; GCN-NEXT: s_cselect_b32 s2, s4, s5
+; GCN-NEXT: s_bfe_u32 s1, s0, 0x10010
+; GCN-NEXT: s_add_i32 s1, s1, s0
+; GCN-NEXT: s_or_b32 s3, s0, 0x400000
+; GCN-NEXT: s_add_i32 s4, s1, 0x7fff
+; GCN-NEXT: v_cmp_u_f32_e64 s[0:1], s0, s0
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec
+; GCN-NEXT: s_cselect_b32 s0, s3, s4
+; GCN-NEXT: s_pack_hh_b32_b16 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+ %res = fptrunc <2 x float> %src to <2 x bfloat>
+ %cast = bitcast <2 x bfloat> %res to float
+ ret float %cast
+}
+
+define amdgpu_ps float @v_test_cvt_f32_bf16_v(float %src) {
+; GCN-LABEL: v_test_cvt_f32_bf16_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v1, v1, v0, s0
+; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: ; return to shader part epilog
+ %trunc = fptrunc float %src to bfloat
+ %ext = fpext bfloat %trunc to float
+ ret float %ext
+}
+
+define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
+; GCN-LABEL: v_test_cvt_v2f64_v2bf16_v:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GCN-NEXT: v_and_b32_e32 v7, 1, v6
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v6, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GCN-NEXT: s_brev_b32 s4, 1
+; GCN-NEXT: v_and_or_b32 v5, v1, s4, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s5, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s5
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GCN-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
+; GCN-NEXT: v_and_b32_e32 v6, 1, v5
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, v[0:1]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[2:3]|, v[0:1]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
+; GCN-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v0, v5, v0
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
+; GCN-NEXT: v_and_or_b32 v1, v3, s4, v0
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 1
+; GCN-NEXT: v_add3_u32 v0, v0, v1, s5
+; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: v_perm_b32 v0, v0, v4, s0
+; GCN-NEXT: ; return to shader part epilog
+ %res = fptrunc <2 x double> %src to <2 x bfloat>
+ %cast = bitcast <2 x bfloat> %res to float
+ ret float %cast
+}
+
+define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16(float %a, float %b) {
+; GCN-LABEL: fptrunc_f32_f32_to_v2bf16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
+; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %a.cvt = fptrunc float %a to bfloat
+ %b.cvt = fptrunc float %b to bfloat
+ %v2.1 = insertelement <2 x bfloat> undef, bfloat %a.cvt, i32 0
+ %v2.2 = insertelement <2 x bfloat> %v2.1, bfloat %b.cvt, i32 1
+ %ret = bitcast <2 x bfloat> %v2.2 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16_mods(float %a, float %b) {
+; GCN-LABEL: fptrunc_f32_f32_to_v2bf16_mods:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
+; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GCN-NEXT: v_and_b32_e32 v2, 0x7fffffff, v1
+; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
+; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, |v1|, |v1|
+; GCN-NEXT: s_mov_b32 s0, 0x7060302
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %a.neg = fneg float %a
+ %a.cvt = fptrunc float %a.neg to bfloat
+ %b.abs = call float @llvm.fabs.f32(float %b)
+ %b.cvt = fptrunc float %b.abs to bfloat
+ %v2.1 = insertelement <2 x bfloat> undef, bfloat %a.cvt, i32 0
+ %v2.2 = insertelement <2 x bfloat> %v2.1, bfloat %b.cvt, i32 1
+ %ret = bitcast <2 x bfloat> %v2.2 to float
+ ret float %ret
+}
+
+define amdgpu_ps void @fptrunc_f32_to_bf16(float %a, ptr %out) {
+; GCN-LABEL: fptrunc_f32_to_bf16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v3, v2
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v1, v1, v0, s0
+; GCN-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.cvt = fptrunc float %a to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f32_to_bf16_abs(float %a, ptr %out) {
+; GCN-LABEL: fptrunc_f32_to_bf16_abs:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v3, v2
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0x7fffffff, v0
+; GCN-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v1, s0
+; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, |v0|, |v0|
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.abs = call float @llvm.fabs.f32(float %a)
+ %a.cvt = fptrunc float %a.abs to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f32_to_bf16_neg(float %a, ptr %out) {
+; GCN-LABEL: fptrunc_f32_to_bf16_neg:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v3, v2
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v0
+; GCN-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v1, s0
+; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GCN-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.neg = fneg float %a
+ %a.cvt = fptrunc float %a.neg to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f64_to_bf16(double %a, ptr %out) {
+; GCN-LABEL: fptrunc_f64_to_bf16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GCN-NEXT: v_and_b32_e32 v7, 1, v6
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v6, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GCN-NEXT: s_brev_b32 s0, 1
+; GCN-NEXT: v_and_or_b32 v5, v1, s0, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.cvt = fptrunc double %a to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f64_to_bf16_neg(double %a, ptr %out) {
+; GCN-LABEL: fptrunc_f64_to_bf16_neg:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
+; GCN-NEXT: v_and_b32_e32 v8, 1, v7
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v7, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: s_brev_b32 s4, 1
+; GCN-NEXT: v_xor_b32_e32 v6, 0x80000000, v1
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: v_and_or_b32 v5, v6, s4, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], -v[0:1]
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.neg = fneg double %a
+ %a.cvt = fptrunc double %a.neg to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
+; GCN-LABEL: fptrunc_f64_to_bf16_abs:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
+; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
+; GCN-NEXT: v_and_b32_e32 v8, 1, v7
+; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GCN-NEXT: v_add_u32_e32 v4, v7, v4
+; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GCN-NEXT: v_and_b32_e32 v6, 0x7fffffff, v1
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: s_brev_b32 s0, 1
+; GCN-NEXT: v_and_or_b32 v5, v6, s0, v4
+; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GCN-NEXT: s_movk_i32 s0, 0x7fff
+; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
+; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GCN-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, |v[0:1]|
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
+; GCN-NEXT: s_endpgm
+entry:
+ %a.abs = call double @llvm.fabs.f64(double %a)
+ %a.cvt = fptrunc double %a.abs to bfloat
+ store bfloat %a.cvt, ptr %out
+ ret void
+}
+
+declare float @llvm.fabs.f32(float)
+declare double @llvm.fabs.f64(double)
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index ebb77c1..9865883 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -16968,7 +16968,7 @@ define bfloat @v_fabs_bf16(bfloat %a) {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
@@ -16977,7 +16977,7 @@ define bfloat @v_fabs_bf16(bfloat %a) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -17163,9 +17163,9 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v0, -1.0, v0
+; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
@@ -17174,9 +17174,9 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, -1.0, v0
+; GFX7-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -17280,8 +17280,6 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v0, v0, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -17293,8 +17291,6 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -17375,10 +17371,6 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v1, v1, v3
; GCN-NEXT: v_min_f32_e32 v0, v0, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -17396,10 +17388,6 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v1, v1, v3
; GFX7-NEXT: v_min_f32_e32 v0, v0, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -17522,12 +17510,6 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v2, v2, v5
; GCN-NEXT: v_min_f32_e32 v1, v1, v4
; GCN-NEXT: v_min_f32_e32 v0, v0, v3
@@ -17551,12 +17533,6 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v2, v2, v5
; GFX7-NEXT: v_min_f32_e32 v1, v1, v4
; GFX7-NEXT: v_min_f32_e32 v0, v0, v3
@@ -17688,14 +17664,6 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v3, v3, v7
; GCN-NEXT: v_min_f32_e32 v2, v2, v6
; GCN-NEXT: v_min_f32_e32 v1, v1, v5
@@ -17725,14 +17693,6 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v3, v3, v7
; GFX7-NEXT: v_min_f32_e32 v2, v2, v6
; GFX7-NEXT: v_min_f32_e32 v1, v1, v5
@@ -17951,22 +17911,6 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v7, v7, v15
; GCN-NEXT: v_min_f32_e32 v6, v6, v14
; GCN-NEXT: v_min_f32_e32 v5, v5, v13
@@ -18020,22 +17964,6 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v7, v7, v15
; GFX7-NEXT: v_min_f32_e32 v6, v6, v14
; GFX7-NEXT: v_min_f32_e32 v5, v5, v13
@@ -18382,71 +18310,51 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_min_f32_e32 v14, v14, v30
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_min_f32_e32 v13, v13, v29
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_min_f32_e32 v12, v12, v28
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_min_f32_e32 v11, v11, v27
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_min_f32_e32 v10, v10, v26
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_min_f32_e32 v9, v9, v25
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_min_f32_e32 v8, v8, v24
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_min_f32_e32 v7, v7, v23
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_min_f32_e32 v6, v6, v22
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_min_f32_e32 v5, v5, v21
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
@@ -18461,8 +18369,6 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_min_f32_e32 v4, v4, v20
; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -18474,21 +18380,10 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_min_f32_e32 v3, v3, v19
; GCN-NEXT: v_min_f32_e32 v2, v2, v18
; GCN-NEXT: v_min_f32_e32 v1, v1, v17
; GCN-NEXT: v_min_f32_e32 v0, v0, v16
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
@@ -18503,8 +18398,9 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_min_f32_e32 v15, v15, v16
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -18513,14 +18409,12 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-LABEL: v_minnum_v16bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_min_f32_e32 v9, v9, v25
-; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_min_f32_e32 v6, v6, v22
+; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
@@ -18531,13 +18425,13 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -18560,13 +18454,13 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
@@ -18579,48 +18473,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_min_f32_e32 v14, v14, v30
; GFX7-NEXT: v_min_f32_e32 v13, v13, v29
; GFX7-NEXT: v_min_f32_e32 v12, v12, v28
; GFX7-NEXT: v_min_f32_e32 v11, v11, v27
; GFX7-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX7-NEXT: v_min_f32_e32 v15, v15, v25
+; GFX7-NEXT: v_min_f32_e32 v9, v9, v25
; GFX7-NEXT: v_min_f32_e32 v8, v8, v24
; GFX7-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX7-NEXT: v_min_f32_e32 v6, v6, v22
; GFX7-NEXT: v_min_f32_e32 v5, v5, v21
; GFX7-NEXT: v_min_f32_e32 v4, v4, v20
; GFX7-NEXT: v_min_f32_e32 v3, v3, v19
@@ -18634,6 +18494,10 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_min_f32_e32 v15, v15, v22
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
@@ -19267,287 +19131,223 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:124
; GCN-NEXT: v_min_f32_e32 v31, v31, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:120
; GCN-NEXT: v_min_f32_e32 v30, v30, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116
; GCN-NEXT: v_min_f32_e32 v29, v29, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:112
; GCN-NEXT: v_min_f32_e32 v28, v28, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108
; GCN-NEXT: v_min_f32_e32 v27, v27, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:104
; GCN-NEXT: v_min_f32_e32 v26, v26, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:100
; GCN-NEXT: v_min_f32_e32 v25, v25, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:96
; GCN-NEXT: v_min_f32_e32 v24, v24, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
; GCN-NEXT: v_min_f32_e32 v23, v23, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:88
; GCN-NEXT: v_min_f32_e32 v22, v22, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:84
; GCN-NEXT: v_min_f32_e32 v21, v21, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:80
; GCN-NEXT: v_min_f32_e32 v20, v20, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76
; GCN-NEXT: v_min_f32_e32 v19, v19, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:72
; GCN-NEXT: v_min_f32_e32 v18, v18, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:68
; GCN-NEXT: v_min_f32_e32 v17, v17, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64
; GCN-NEXT: v_min_f32_e32 v16, v16, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
; GCN-NEXT: v_min_f32_e32 v15, v15, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56
; GCN-NEXT: v_min_f32_e32 v14, v14, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52
; GCN-NEXT: v_min_f32_e32 v13, v13, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48
; GCN-NEXT: v_min_f32_e32 v12, v12, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44
; GCN-NEXT: v_min_f32_e32 v11, v11, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40
; GCN-NEXT: v_min_f32_e32 v10, v10, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
; GCN-NEXT: v_min_f32_e32 v9, v9, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
; GCN-NEXT: v_min_f32_e32 v8, v8, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28
; GCN-NEXT: v_min_f32_e32 v7, v7, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24
; GCN-NEXT: v_min_f32_e32 v6, v6, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
; GCN-NEXT: v_min_f32_e32 v5, v5, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; GCN-NEXT: v_min_f32_e32 v4, v4, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
; GCN-NEXT: v_min_f32_e32 v3, v3, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; GCN-NEXT: v_min_f32_e32 v2, v2, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
; GCN-NEXT: v_min_f32_e32 v1, v1, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_min_f32_e32 v0, v0, v32
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -19590,322 +19390,258 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: v_min_f32_e32 v31, v31, v32
-; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: v_min_f32_e32 v31, v31, v32
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GFX7-NEXT: v_min_f32_e32 v30, v30, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v29, v29, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v28, v28, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v27, v27, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v26, v26, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v25, v25, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v24, v24, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v23, v23, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v22, v22, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v21, v21, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v20, v20, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v19, v19, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v18, v18, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v17, v17, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v16, v16, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v15, v15, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v14, v14, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v13, v13, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v12, v12, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v11, v11, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v10, v10, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v9, v9, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v8, v8, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v7, v7, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v6, v6, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v5, v5, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v4, v4, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v3, v3, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v2, v2, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v1, v1, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_min_f32_e32 v0, v0, v32
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -21097,8 +20833,6 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v0, v0, v1
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -21110,8 +20844,6 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -21192,10 +20924,6 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v1, v1, v3
; GCN-NEXT: v_max_f32_e32 v0, v0, v2
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -21213,10 +20941,6 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v1, v1, v3
; GFX7-NEXT: v_max_f32_e32 v0, v0, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -21339,12 +21063,6 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v2, v2, v5
; GCN-NEXT: v_max_f32_e32 v1, v1, v4
; GCN-NEXT: v_max_f32_e32 v0, v0, v3
@@ -21368,12 +21086,6 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v2, v2, v5
; GFX7-NEXT: v_max_f32_e32 v1, v1, v4
; GFX7-NEXT: v_max_f32_e32 v0, v0, v3
@@ -21505,14 +21217,6 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v3, v3, v7
; GCN-NEXT: v_max_f32_e32 v2, v2, v6
; GCN-NEXT: v_max_f32_e32 v1, v1, v5
@@ -21542,14 +21246,6 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v3, v3, v7
; GFX7-NEXT: v_max_f32_e32 v2, v2, v6
; GFX7-NEXT: v_max_f32_e32 v1, v1, v5
@@ -21768,22 +21464,6 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v7, v7, v15
; GCN-NEXT: v_max_f32_e32 v6, v6, v14
; GCN-NEXT: v_max_f32_e32 v5, v5, v13
@@ -21837,22 +21517,6 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v7, v7, v15
; GFX7-NEXT: v_max_f32_e32 v6, v6, v14
; GFX7-NEXT: v_max_f32_e32 v5, v5, v13
@@ -22199,71 +21863,51 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_max_f32_e32 v14, v14, v30
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_max_f32_e32 v13, v13, v29
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_max_f32_e32 v12, v12, v28
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_max_f32_e32 v11, v11, v27
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_max_f32_e32 v10, v10, v26
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_max_f32_e32 v9, v9, v25
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_max_f32_e32 v8, v8, v24
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_max_f32_e32 v7, v7, v23
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_max_f32_e32 v6, v6, v22
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_max_f32_e32 v5, v5, v21
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
@@ -22278,8 +21922,6 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_max_f32_e32 v4, v4, v20
; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -22291,21 +21933,10 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_max_f32_e32 v3, v3, v19
; GCN-NEXT: v_max_f32_e32 v2, v2, v18
; GCN-NEXT: v_max_f32_e32 v1, v1, v17
; GCN-NEXT: v_max_f32_e32 v0, v0, v16
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
@@ -22320,8 +21951,9 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_max_f32_e32 v15, v15, v16
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
@@ -22330,14 +21962,12 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-LABEL: v_maxnum_v16bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_max_f32_e32 v9, v9, v25
-; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_max_f32_e32 v6, v6, v22
+; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
@@ -22348,13 +21978,13 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -22377,13 +22007,13 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
@@ -22396,48 +22026,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_max_f32_e32 v14, v14, v30
; GFX7-NEXT: v_max_f32_e32 v13, v13, v29
; GFX7-NEXT: v_max_f32_e32 v12, v12, v28
; GFX7-NEXT: v_max_f32_e32 v11, v11, v27
; GFX7-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX7-NEXT: v_max_f32_e32 v15, v15, v25
+; GFX7-NEXT: v_max_f32_e32 v9, v9, v25
; GFX7-NEXT: v_max_f32_e32 v8, v8, v24
; GFX7-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX7-NEXT: v_max_f32_e32 v6, v6, v22
; GFX7-NEXT: v_max_f32_e32 v5, v5, v21
; GFX7-NEXT: v_max_f32_e32 v4, v4, v20
; GFX7-NEXT: v_max_f32_e32 v3, v3, v19
@@ -22451,6 +22047,10 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_max_f32_e32 v15, v15, v22
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
@@ -23084,287 +22684,223 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:124
; GCN-NEXT: v_max_f32_e32 v31, v31, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:120
; GCN-NEXT: v_max_f32_e32 v30, v30, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116
; GCN-NEXT: v_max_f32_e32 v29, v29, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:112
; GCN-NEXT: v_max_f32_e32 v28, v28, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108
; GCN-NEXT: v_max_f32_e32 v27, v27, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:104
; GCN-NEXT: v_max_f32_e32 v26, v26, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:100
; GCN-NEXT: v_max_f32_e32 v25, v25, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:96
; GCN-NEXT: v_max_f32_e32 v24, v24, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
; GCN-NEXT: v_max_f32_e32 v23, v23, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:88
; GCN-NEXT: v_max_f32_e32 v22, v22, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:84
; GCN-NEXT: v_max_f32_e32 v21, v21, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:80
; GCN-NEXT: v_max_f32_e32 v20, v20, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76
; GCN-NEXT: v_max_f32_e32 v19, v19, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:72
; GCN-NEXT: v_max_f32_e32 v18, v18, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:68
; GCN-NEXT: v_max_f32_e32 v17, v17, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64
; GCN-NEXT: v_max_f32_e32 v16, v16, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
; GCN-NEXT: v_max_f32_e32 v15, v15, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56
; GCN-NEXT: v_max_f32_e32 v14, v14, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52
; GCN-NEXT: v_max_f32_e32 v13, v13, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48
; GCN-NEXT: v_max_f32_e32 v12, v12, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44
; GCN-NEXT: v_max_f32_e32 v11, v11, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40
; GCN-NEXT: v_max_f32_e32 v10, v10, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
; GCN-NEXT: v_max_f32_e32 v9, v9, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
; GCN-NEXT: v_max_f32_e32 v8, v8, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28
; GCN-NEXT: v_max_f32_e32 v7, v7, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24
; GCN-NEXT: v_max_f32_e32 v6, v6, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
; GCN-NEXT: v_max_f32_e32 v5, v5, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; GCN-NEXT: v_max_f32_e32 v4, v4, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
; GCN-NEXT: v_max_f32_e32 v3, v3, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; GCN-NEXT: v_max_f32_e32 v2, v2, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
; GCN-NEXT: v_max_f32_e32 v1, v1, v32
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v33
; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GCN-NEXT: v_max_f32_e32 v0, v0, v32
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -23407,322 +22943,258 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
-; GFX7-NEXT: v_max_f32_e32 v31, v31, v32
-; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: v_max_f32_e32 v31, v31, v32
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; GFX7-NEXT: v_max_f32_e32 v30, v30, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v29, v29, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v28, v28, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v27, v27, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v26, v26, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v25, v25, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v24, v24, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v23, v23, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v22, v22, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v21, v21, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v20, v20, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v19, v19, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v18, v18, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v17, v17, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v16, v16, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v15, v15, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v14, v14, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v13, v13, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v12, v12, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v11, v11, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v10, v10, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v9, v9, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v8, v8, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v7, v7, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v6, v6, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v5, v5, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v4, v4, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v3, v3, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v2, v2, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v1, v1, v32
; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_max_f32_e32 v0, v0, v32
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -25176,7 +24648,6 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
; GCN-NEXT: v_frexp_exp_i32_f32_e32 v2, v0
; GCN-NEXT: v_cmp_lt_f32_e64 vcc, |v0|, s4
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v2, vcc
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -26818,11 +26289,17 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
; GCN-LABEL: v_canonicalize_bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_canonicalize_bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_canonicalize_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/clamp.ll b/llvm/test/CodeGen/AMDGPU/clamp.ll
index dfadd8d..9472845 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp.ll
+++ b/llvm/test/CodeGen/AMDGPU/clamp.ll
@@ -2996,18 +2996,16 @@ define amdgpu_kernel void @v_clamp_v2f16_undef_elt(ptr addrspace(1) %out, ptr ad
; GFX6-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_max_f32_e32 v3, 0x7fc00000, v3
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_med3_f32 v2, v2, 0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_max_f32_e32 v2, 0x7fc00000, v2
+; GFX6-NEXT: v_med3_f32 v3, v3, 0, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; GFX6-NEXT: s_endpgm
;
@@ -3095,16 +3093,15 @@ define amdgpu_kernel void @v_clamp_v2f16_not_zero(ptr addrspace(1) %out, ptr add
; GFX6-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_cvt_f32_f16_e64 v2, v2 clamp
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_max_f32_e32 v3, 2.0, v3
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX6-NEXT: v_cvt_f32_f16_e64 v3, v3 clamp
+; GFX6-NEXT: v_max_f32_e32 v2, 2.0, v2
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; GFX6-NEXT: s_endpgm
;
@@ -3198,9 +3195,8 @@ define amdgpu_kernel void @v_clamp_v2f16_not_one(ptr addrspace(1) %out, ptr addr
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
; GFX6-NEXT: v_cvt_f32_f16_e64 v3, v3 clamp
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
; GFX6-NEXT: v_med3_f32 v2, v2, 0, 0
; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
@@ -3760,19 +3756,17 @@ define amdgpu_kernel void @v_clamp_v2f16_undef_limit_elts0(ptr addrspace(1) %out
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
; GFX6-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
-; GFX6-NEXT: s_mov_b32 s2, 0x7fc00000
; GFX6-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_med3_f32 v3, v3, s2, 1.0
+; GFX6-NEXT: v_max_f32_e32 v3, 0x7fc00000, v3
+; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
; GFX6-NEXT: v_med3_f32 v2, v2, 0, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
@@ -3863,18 +3857,16 @@ define amdgpu_kernel void @v_clamp_v2f16_undef_limit_elts1(ptr addrspace(1) %out
; GFX6-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; GFX6-NEXT: v_max_f32_e32 v3, 0x7fc00000, v3
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_med3_f32 v2, v2, 0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_min_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_max_f32_e32 v2, 0x7fc00000, v2
+; GFX6-NEXT: v_med3_f32 v3, v3, 0, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_f32_e32 v2, 1.0, v2
+; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
; GFX6-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; GFX6-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
index 2ed6d7f..1c8725f 100644
--- a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
+++ b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
@@ -1,10 +1,12 @@
; RUN: llc --amdgpu-disable-structurizer -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
; RUN: llc --amdgpu-disable-structurizer -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
+; RUN: llc --amdgpu-disable-structurizer -global-isel -stop-after=irtranslator -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s --check-prefixes=CHECK,GISEL
; CHECK-LABEL: name: basic_call
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ENTRY
-; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, [[TOKEN]], csr_amdgpu, {{.*}}
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
; DEADMI: {{.*}} SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} G_SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
define i32 @basic_call(i32 %src) #0 {
%t = call token @llvm.experimental.convergence.entry()
%r = call i32 @foo(i32 %src) [ "convergencectrl"(token %t) ]
@@ -12,10 +14,11 @@ define i32 @basic_call(i32 %src) #0 {
}
; CHECK-LABEL: name: basic_intrinsic
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
define i32 @basic_intrinsic(i32 %src) #0 {
%t = call token @llvm.experimental.convergence.anchor()
%r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
@@ -30,12 +33,13 @@ define i32 @uncontrolled_call(i32 %src) #0 {
}
; CHECK-LABEL: name: basic_branch
-; CHECK: bb.0.entry:
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-; CHECK: bb.1.then:
+; CHECK: bb.[[#]].entry:
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; CHECK: bb.[[#]].then:
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
define i32 @basic_branch(i32 %src, i1 %cond) #0 {
entry:
%t = call token @llvm.experimental.convergence.anchor()
@@ -52,12 +56,13 @@ else:
}
; CHECK-LABEL: name: basic_loop
-; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
-; CHECK: bb.1.loop:
-; CHECK: [[LOOP:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_LOOP [[TOKEN]]
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; CHECK: bb.[[#]].loop:
+; CHECK: [[LOOP:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_LOOP [[TOKEN]]
; ISEL: CONVERGENCECTRL_GLUE [[LOOP]]
; DEADMI-NOT: CONVERGENCECTRL_GLUE
-; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[LOOP]]
define i32 @basic_loop(i32 %src, i1 %cond) #0 {
%t1 = call token @llvm.experimental.convergence.anchor()
br label %loop
@@ -71,6 +76,32 @@ end:
ret i32 %r
}
+; CHECK-LABEL: name: nested
+; CHECK: [[ENTRY:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; CHECK: [[ANCHOR:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ANCHOR]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ANCHOR]]
+; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ENTRY]]
+; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ENTRY]]
+define i32 @nested(i32 %src) #0 {
+ %t1 = call token @llvm.experimental.convergence.entry()
+ %t2 = call token @llvm.experimental.convergence.anchor()
+ %r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
+ %r1 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t1) ]
+ %sum = add i32 %r1, %r2
+ ret i32 %sum
+}
+
+; CHECK-LABEL: name: tail_call_void_func_void
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; CHECK: {{.*}} SI_TCRETURN {{.*}}, @external_void_func_void, 0, csr_amdgpu, {{.*}}implicit [[TOKEN]]
+define void @tail_call_void_func_void() #0 {
+ %t1 = call token @llvm.experimental.convergence.entry()
+ tail call void @external_void_func_void() [ "convergencectrl"(token %t1) ]
+ ret void
+}
+
+declare hidden void @external_void_func_void() #0
declare i32 @foo(i32 %x) #0
declare i32 @llvm.amdgcn.readfirstlane(i32) #0
diff --git a/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir b/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
index 895185c..577d38e 100644
--- a/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
+++ b/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
@@ -333,7 +333,7 @@
ret void
}
- attributes #0 = { "amdgpu-waves-per-eu"="4,4" }
+ attributes #0 = { "amdgpu-waves-per-eu"="4,4" "amdgpu-no-agpr" }
...
---
diff --git a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
index 0c03419..386f9cd 100644
--- a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
@@ -35,6 +35,6 @@ define amdgpu_kernel void @test_direct_indirect_call() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR1]] = { "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 2f3d5d9..cf99b5d 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -1,10 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s
-; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-SDAG-O0 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0 %s
-; FIXME: GlobalISel missing the power-of-2 cases in legalization. https://github.com/llvm/llvm-project/issues/80671
-; xUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9 %s
-; xUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-O0 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-G %s
+; RUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-G-O0 %s
define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-LABEL: v_sdiv_i128_vv:
@@ -1223,6 +1222,1158 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_sdiv_i128_vv:
+; GFX9-G: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v16, 31, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v16, v0
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v16, v1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v10, vcc, v0, v16
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v16, v2
+; GFX9-G-NEXT: v_subb_co_u32_e32 v11, vcc, v1, v16, vcc
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v17, 31, v7
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v16, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v2, v16, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v13, vcc, v3, v16, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v17, v4
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v17, v5
+; GFX9-G-NEXT: v_sub_co_u32_e32 v18, vcc, v0, v17
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v17, v6
+; GFX9-G-NEXT: v_subb_co_u32_e32 v19, vcc, v1, v17, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v17, v7
+; GFX9-G-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v17, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v17, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v0, v18, v4
+; GFX9-G-NEXT: v_or_b32_e32 v1, v19, v5
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v0, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v1, v11, v13
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v18
+; GFX9-G-NEXT: v_ffbh_u32_e32 v0, v19
+; GFX9-G-NEXT: v_add_u32_e32 v1, 32, v1
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v4
+; GFX9-G-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v5
+; GFX9-G-NEXT: v_add_u32_e32 v2, 32, v2
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[4:5]
+; GFX9-G-NEXT: v_add_u32_e32 v0, 64, v0
+; GFX9-G-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v10
+; GFX9-G-NEXT: v_cndmask_b32_e64 v0, v1, v0, s[6:7]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v11
+; GFX9-G-NEXT: v_add_u32_e32 v2, 32, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v3, v12
+; GFX9-G-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v13
+; GFX9-G-NEXT: v_add_u32_e32 v3, 32, v3
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[12:13]
+; GFX9-G-NEXT: v_add_u32_e32 v1, 64, v1
+; GFX9-G-NEXT: v_min_u32_e32 v2, v2, v3
+; GFX9-G-NEXT: v_cndmask_b32_e64 v1, v2, v1, s[6:7]
+; GFX9-G-NEXT: v_sub_co_u32_e64 v0, s[6:7], v0, v1
+; GFX9-G-NEXT: v_subb_co_u32_e64 v1, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v6, 0x7f
+; GFX9-G-NEXT: v_subb_co_u32_e64 v2, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-NEXT: v_subb_co_u32_e64 v3, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_cmp_gt_u64_e64 s[6:7], v[0:1], v[6:7]
+; GFX9-G-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_lt_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: v_or_b32_e32 v15, v1, v3
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, v7, v6, s[6:7]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
+; GFX9-G-NEXT: v_or_b32_e32 v20, v7, v6
+; GFX9-G-NEXT: v_xor_b32_e32 v6, 0x7f, v0
+; GFX9-G-NEXT: v_or_b32_e32 v14, v6, v2
+; GFX9-G-NEXT: v_and_b32_e32 v6, 1, v20
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, v10, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, v11, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v12, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v13, 0, vcc
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v14, v20, v14
+; GFX9-G-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GFX9-G-NEXT: s_cbranch_execz .LBB0_6
+; GFX9-G-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-G-NEXT: v_add_co_u32_e32 v20, vcc, 1, v0
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v1, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v22, vcc, 0, v2, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v3, vcc
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v8, vcc, 0x7f, v0
+; GFX9-G-NEXT: v_sub_u32_e32 v0, 64, v8
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v0, v[10:11]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], v8, v[12:13]
+; GFX9-G-NEXT: v_subrev_u32_e32 v9, 64, v8
+; GFX9-G-NEXT: v_lshlrev_b64 v[6:7], v8, v[10:11]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], v9, v[10:11]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v8
+; GFX9-G-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v7, 0, v7, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-G-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v0, v12, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v1, v13, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-G-NEXT: v_mov_b32_e32 v3, s11
+; GFX9-G-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-G-NEXT: s_xor_b64 s[12:13], exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-G-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-G-NEXT: v_sub_u32_e32 v2, 64, v20
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v20, v[10:11]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], v2, v[12:13]
+; GFX9-G-NEXT: v_subrev_u32_e32 v24, 64, v20
+; GFX9-G-NEXT: v_lshrrev_b64 v[14:15], v20, v[12:13]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v24, v[12:13]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v20
+; GFX9-G-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v14, 0, v14, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v15, 0, v15, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v24, vcc, -1, v18
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v20
+; GFX9-G-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v19, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v12, v0, v10, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v13, v1, v11, s[4:5]
+; GFX9-G-NEXT: v_addc_co_u32_e32 v26, vcc, -1, v4, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v5, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-G-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-G-NEXT: v_mov_b32_e32 v3, s11
+; GFX9-G-NEXT: .LBB0_3: ; %udiv-do-while
+; GFX9-G-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[6:7]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v10, 31, v7
+; GFX9-G-NEXT: v_or_b32_e32 v6, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v7, v1, v3
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[12:13]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v12, 31, v9
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v2, v12
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v14, 31, v13
+; GFX9-G-NEXT: v_sub_co_u32_e32 v12, vcc, v24, v2
+; GFX9-G-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v25, v3, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v26, v0, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v27, v1, vcc
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v28, 31, v12
+; GFX9-G-NEXT: v_and_b32_e32 v12, v28, v18
+; GFX9-G-NEXT: v_sub_co_u32_e32 v12, vcc, v2, v12
+; GFX9-G-NEXT: v_and_b32_e32 v2, v28, v19
+; GFX9-G-NEXT: v_subb_co_u32_e32 v13, vcc, v3, v2, vcc
+; GFX9-G-NEXT: v_and_b32_e32 v2, v28, v4
+; GFX9-G-NEXT: v_subb_co_u32_e32 v14, vcc, v0, v2, vcc
+; GFX9-G-NEXT: v_and_b32_e32 v0, v28, v5
+; GFX9-G-NEXT: v_subb_co_u32_e32 v15, vcc, v1, v0, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v20, vcc, -1, v20
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, -1, v21, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v22, vcc, -1, v22, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v23, vcc
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v0, v20, v22
+; GFX9-G-NEXT: v_or_b32_e32 v1, v21, v23
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v10
+; GFX9-G-NEXT: v_and_b32_e32 v10, 1, v28
+; GFX9-G-NEXT: v_mov_b32_e32 v0, v10
+; GFX9-G-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v1, v11
+; GFX9-G-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execnz .LBB0_3
+; GFX9-G-NEXT: ; %bb.4: ; %Flow
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: .LBB0_5: ; %Flow2
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[12:13]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[6:7]
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v4, 31, v7
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v4
+; GFX9-G-NEXT: v_or_b32_e32 v6, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v7, v1, v3
+; GFX9-G-NEXT: .LBB0_6: ; %Flow3
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v17, v16
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v6, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v7, v3
+; GFX9-G-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v8, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v4, v9, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v3, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v3, vcc
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_sdiv_i128_vv:
+; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9_vgpr10_vgpr11 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14_vgpr15_vgpr16 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v1
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v12, v3, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v10, v1, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v16
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v11, v3, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v9, v1, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v1, v12, v1
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v4, v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v3, v10, v3
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v2, v10, v2
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v1, s[6:7], v1, v12
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[6:7], v4, v12, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v6, s[6:7], v3, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v5, s[6:7], v2, v10, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v5, v11, v5
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v8, v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v9, v7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v9, v6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v5, s[6:7], v5, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v15, s[6:7], v8, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v14, s[6:7], v7, v9, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v13, s[6:7], v6, v9, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v13
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v13, v11, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v11, v11, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v11, v9, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v9, v9, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[6:7], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s16, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v8, v8, v9
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v8
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s15, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s11, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s14, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v6, s[8:9], v5, v6
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s16
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[8:9], v5, v7, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s14
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s10
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[10:11], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s13
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[12:13], v[10:11], v[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[10:11]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v7, s6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v6, v6, v9
+; GFX9-G-O0-NEXT: v_or_b32_e64 v8, v7, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[6:7], v[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v1, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v3, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v5
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s4, 0
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s5, 1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB0_3
+; GFX9-G-O0-NEXT: s_branch .LBB0_8
+; GFX9-G-O0-NEXT: .LBB0_1: ; %Flow
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v0, 2
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v0, 3
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_5
+; GFX9-G-O0-NEXT: .LBB0_3: ; %Flow2
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v4, 0
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v4, 1
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_9
+; GFX9-G-O0-NEXT: .LBB0_4: ; %udiv-loop-exit
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_3
+; GFX9-G-O0-NEXT: .LBB0_5: ; %Flow1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v8, 4
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v8, 5
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_4
+; GFX9-G-O0-NEXT: .LBB0_6: ; %udiv-do-while
+; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s6, v16, 6
+; GFX9-G-O0-NEXT: v_readlane_b32 s7, v16, 7
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[21:22], v2, v[0:1]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v22
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[23:24], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[12:13]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v34
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v30
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v24
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v1, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v22
+; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v14, v15
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v11, s[8:9], v11, v4
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v10, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v8, v7, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v6, v5, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v8, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v6, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_and_b32_e64 v12, v8, s9
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, s8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12_vgpr13 killed $vgpr12_vgpr13 def $vgpr12_vgpr13_vgpr14_vgpr15 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v25
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v28
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v24
+; GFX9-G-O0-NEXT: v_and_b32_e64 v11, v8, v11
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v22
+; GFX9-G-O0-NEXT: v_and_b32_e64 v8, v6, v8
+; GFX9-G-O0-NEXT: v_and_b32_e64 v6, v6, v21
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[8:9], v4, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v7, v8, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s8, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s12, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s11, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s8
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v17, s[8:9], v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s12
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v18, s[8:9], v10, v11, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s11
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v20, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v19, s[8:9], v8, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-G-O0-NEXT: v_or_b32_e64 v17, v17, v20
+; GFX9-G-O0-NEXT: v_or_b32_e64 v19, v18, v19
+; GFX9-G-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[19:20]
+; GFX9-G-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 2
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 3
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execnz .LBB0_6
+; GFX9-G-O0-NEXT: s_branch .LBB0_1
+; GFX9-G-O0-NEXT: .LBB0_7: ; %udiv-preheader
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v4, v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v5, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v13, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v13, v6
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v13, v[21:22]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[26:27], v13, v[15:16]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[24:25], v5, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v24
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v25
+; GFX9-G-O0-NEXT: v_or_b32_e64 v14, v14, v23
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[21:22], v4, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v22
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v16
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v5, v13, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4_vgpr5 killed $vgpr4_vgpr5 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s7, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s6, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v16, s[4:5], v16, v17
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v15, s[4:5], v15, v16, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v14, s[4:5], v14, v15, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v13, s[4:5], v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s8, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s9, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_6
+; GFX9-G-O0-NEXT: .LBB0_8: ; %udiv-bb1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(3)
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v2, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v6, s[6:7], v4, v6, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v8, s[6:7], v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v1, v3, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[6:7], v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v3, v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v9, v1, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[8:9], v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, v1
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[1:2], v4, v[13:14]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[18:19], v9, v[13:14]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[16:17], v4, v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_or_b32_e64 v10, v10, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[13:14], v3, v[13:14]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
+; GFX9-G-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4
+; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s6, 4
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s7, 5
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-G-O0-NEXT: s_branch .LBB0_7
+; GFX9-G-O0-NEXT: .LBB0_9: ; %udiv-end
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v0, v0, v8
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v1, v1, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v10
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v2, v2, v6
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v3, v3, v5
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v0, s[4:5], v0, v8
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v1, s[4:5], v1, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v2, s[4:5], v2, v6, s[4:5]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v3, s[4:5], v3, v5, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = sdiv i128 %lhs, %rhs
ret i128 %div
}
@@ -2306,6 +3457,1043 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_udiv_i128_vv:
+; GFX9-G: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_or_b32_e32 v8, v4, v6
+; GFX9-G-NEXT: v_or_b32_e32 v9, v5, v7
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v8, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v9, v1, v3
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v4
+; GFX9-G-NEXT: v_ffbh_u32_e32 v8, v5
+; GFX9-G-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v6
+; GFX9-G-NEXT: v_min_u32_e32 v8, v8, v9
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v7
+; GFX9-G-NEXT: v_add_u32_e32 v10, 32, v10
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[6:7]
+; GFX9-G-NEXT: v_add_u32_e32 v8, 64, v8
+; GFX9-G-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[6:7]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v1
+; GFX9-G-NEXT: v_add_u32_e32 v10, 32, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v11, v2
+; GFX9-G-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v3
+; GFX9-G-NEXT: v_add_u32_e32 v11, 32, v11
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: v_add_u32_e32 v9, 64, v9
+; GFX9-G-NEXT: v_min_u32_e32 v10, v10, v11
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v10, v9, s[6:7]
+; GFX9-G-NEXT: v_sub_co_u32_e64 v12, s[6:7], v8, v9
+; GFX9-G-NEXT: v_subb_co_u32_e64 v13, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v8, 0x7f
+; GFX9-G-NEXT: v_subb_co_u32_e64 v14, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-G-NEXT: v_subb_co_u32_e64 v15, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_cmp_gt_u64_e64 s[6:7], v[12:13], v[8:9]
+; GFX9-G-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_lt_u64_e64 s[6:7], 0, v[14:15]
+; GFX9-G-NEXT: v_or_b32_e32 v17, v13, v15
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[6:7]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[4:5]
+; GFX9-G-NEXT: v_or_b32_e32 v18, v9, v8
+; GFX9-G-NEXT: v_xor_b32_e32 v8, 0x7f, v12
+; GFX9-G-NEXT: v_or_b32_e32 v16, v8, v14
+; GFX9-G-NEXT: v_and_b32_e32 v8, 1, v18
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-G-NEXT: v_cndmask_b32_e64 v10, v0, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v11, v1, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v2, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v3, 0, vcc
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v16, v18, v16
+; GFX9-G-NEXT: v_and_b32_e32 v16, 1, v16
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GFX9-G-NEXT: s_cbranch_execz .LBB1_6
+; GFX9-G-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-G-NEXT: v_add_co_u32_e32 v18, vcc, 1, v12
+; GFX9-G-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v13, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v14, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v15, vcc
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v16, vcc, 0x7f, v12
+; GFX9-G-NEXT: v_sub_u32_e32 v8, 64, v16
+; GFX9-G-NEXT: v_lshrrev_b64 v[8:9], v8, v[0:1]
+; GFX9-G-NEXT: v_lshlrev_b64 v[10:11], v16, v[2:3]
+; GFX9-G-NEXT: v_subrev_u32_e32 v14, 64, v16
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], v16, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v10, v8, v10
+; GFX9-G-NEXT: v_or_b32_e32 v11, v9, v11
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], v14, v[0:1]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v16
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_cndmask_b32_e32 v14, 0, v12, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v15, 0, v13, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GFX9-G-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16
+; GFX9-G-NEXT: v_mov_b32_e32 v13, s11
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v8, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v9, v3, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v10, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-G-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-G-NEXT: s_xor_b64 s[12:13], exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-G-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-G-NEXT: v_sub_u32_e32 v12, 64, v18
+; GFX9-G-NEXT: v_subrev_u32_e32 v22, 64, v18
+; GFX9-G-NEXT: v_lshrrev_b64 v[10:11], v18, v[0:1]
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], v12, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b64 v[16:17], v18, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b64 v[2:3], v22, v[2:3]
+; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v11, v11, v13
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v22, vcc, -1, v4
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v5, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v18
+; GFX9-G-NEXT: v_addc_co_u32_e32 v24, vcc, -1, v6, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v13, s11
+; GFX9-G-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v3, v3, v1, s[4:5]
+; GFX9-G-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v7, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-NEXT: v_mov_b32_e32 v11, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v10, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-G-NEXT: .LBB1_3: ; %udiv-do-while
+; GFX9-G-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], 1, v[14:15]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v0, 31, v15
+; GFX9-G-NEXT: v_or_b32_e32 v14, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v15, v11, v13
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], 1, v[16:17]
+; GFX9-G-NEXT: v_lshlrev_b64 v[10:11], 1, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v3
+; GFX9-G-NEXT: v_or_b32_e32 v12, v12, v2
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v9
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v10, v2
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v0
+; GFX9-G-NEXT: v_sub_co_u32_e32 v0, vcc, v22, v2
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v23, v11, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v24, v12, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v25, v13, vcc
+; GFX9-G-NEXT: v_add_co_u32_e64 v18, s[4:5], -1, v18
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v3, 31, v0
+; GFX9-G-NEXT: v_addc_co_u32_e64 v19, s[4:5], -1, v19, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v10, v3, v4
+; GFX9-G-NEXT: v_addc_co_u32_e64 v20, s[4:5], -1, v20, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v16, v3, v5
+; GFX9-G-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v10
+; GFX9-G-NEXT: v_addc_co_u32_e64 v21, s[4:5], -1, v21, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v0, 1, v3
+; GFX9-G-NEXT: v_and_b32_e32 v17, v3, v6
+; GFX9-G-NEXT: v_and_b32_e32 v26, v3, v7
+; GFX9-G-NEXT: v_subb_co_u32_e32 v3, vcc, v11, v16, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v10, v18, v20
+; GFX9-G-NEXT: v_or_b32_e32 v11, v19, v21
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; GFX9-G-NEXT: v_subb_co_u32_e32 v16, vcc, v12, v17, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-NEXT: v_subb_co_u32_e32 v17, vcc, v13, v26, vcc
+; GFX9-G-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v10, v0
+; GFX9-G-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execnz .LBB1_3
+; GFX9-G-NEXT: ; %bb.4: ; %Flow
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: .LBB1_5: ; %Flow2
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[12:13]
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v15
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v2
+; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v0
+; GFX9-G-NEXT: v_or_b32_e32 v11, v11, v1
+; GFX9-G-NEXT: .LBB1_6: ; %Flow3
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, v10
+; GFX9-G-NEXT: v_mov_b32_e32 v1, v11
+; GFX9-G-NEXT: v_mov_b32_e32 v2, v8
+; GFX9-G-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_udiv_i128_vv:
+; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9_vgpr10_vgpr11 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v4
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v5
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[6:7], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s14, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v8, v8, v9
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v8
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s13, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s11, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s12, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v6, s[8:9], v5, v6
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s14
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[8:9], v5, v7, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s12
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s10
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[10:11], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s13
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[12:13], v[10:11], v[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[10:11]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v7, s6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v6, v6, v9
+; GFX9-G-O0-NEXT: v_or_b32_e64 v8, v7, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[6:7], v[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v1, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v3, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v5
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s4, 0
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s5, 1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB1_3
+; GFX9-G-O0-NEXT: s_branch .LBB1_8
+; GFX9-G-O0-NEXT: .LBB1_1: ; %Flow
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v0, 2
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v0, 3
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_5
+; GFX9-G-O0-NEXT: .LBB1_3: ; %Flow2
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v4, 0
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v4, 1
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_9
+; GFX9-G-O0-NEXT: .LBB1_4: ; %udiv-loop-exit
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_3
+; GFX9-G-O0-NEXT: .LBB1_5: ; %Flow1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v8, 4
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v8, 5
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_4
+; GFX9-G-O0-NEXT: .LBB1_6: ; %udiv-do-while
+; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s6, v16, 6
+; GFX9-G-O0-NEXT: v_readlane_b32 s7, v16, 7
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[21:22], v2, v[0:1]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v22
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[23:24], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[12:13]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v34
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v30
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v24
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v1, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v22
+; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v14, v15
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v11, s[8:9], v11, v4
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v10, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v8, v7, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v6, v5, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v8, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v6, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_and_b32_e64 v12, v8, s9
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, s8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12_vgpr13 killed $vgpr12_vgpr13 def $vgpr12_vgpr13_vgpr14_vgpr15 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v25
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v28
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v24
+; GFX9-G-O0-NEXT: v_and_b32_e64 v11, v8, v11
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v22
+; GFX9-G-O0-NEXT: v_and_b32_e64 v8, v6, v8
+; GFX9-G-O0-NEXT: v_and_b32_e64 v6, v6, v21
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[8:9], v4, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v7, v8, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s8, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s12, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s11, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s8
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v17, s[8:9], v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s12
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v18, s[8:9], v10, v11, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s11
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v20, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v19, s[8:9], v8, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-G-O0-NEXT: v_or_b32_e64 v17, v17, v20
+; GFX9-G-O0-NEXT: v_or_b32_e64 v19, v18, v19
+; GFX9-G-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[19:20]
+; GFX9-G-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 2
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 3
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execnz .LBB1_6
+; GFX9-G-O0-NEXT: s_branch .LBB1_1
+; GFX9-G-O0-NEXT: .LBB1_7: ; %udiv-preheader
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v4, v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v5, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v13, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v13, v6
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v13, v[21:22]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[26:27], v13, v[15:16]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[24:25], v5, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v24
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v25
+; GFX9-G-O0-NEXT: v_or_b32_e64 v14, v14, v23
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[21:22], v4, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v22
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v16
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v5, v13, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4_vgpr5 killed $vgpr4_vgpr5 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s7, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s6, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v16, s[4:5], v16, v17
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v15, s[4:5], v15, v16, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v14, s[4:5], v14, v15, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v13, s[4:5], v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s8, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s9, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_6
+; GFX9-G-O0-NEXT: .LBB1_8: ; %udiv-bb1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(3)
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v2, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v6, s[6:7], v4, v6, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v8, s[6:7], v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v1, v3, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[6:7], v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v3, v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v9, v1, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[8:9], v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, v1
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[1:2], v4, v[13:14]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[18:19], v9, v[13:14]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[16:17], v4, v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_or_b32_e64 v10, v10, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[13:14], v3, v[13:14]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
+; GFX9-G-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4
+; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s6, 4
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s7, 5
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-G-O0-NEXT: s_branch .LBB1_7
+; GFX9-G-O0-NEXT: .LBB1_9: ; %udiv-end
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v8
+; GFX9-G-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = udiv i128 %lhs, %rhs
ret i128 %div
}
@@ -2388,6 +4576,66 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s4, v[3:4]
; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr3_vgpr4 killed $exec
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_sdiv_i128_v_pow2k:
+; GFX9-G: ; %bb.0:
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX9-G-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-G-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
+; GFX9-G-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-G-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v3, 1, v4
+; GFX9-G-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v2, 1, v2
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_sdiv_i128_v_pow2k:
+; GFX9-G-O0: ; %bb.0:
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v0, v0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v0
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v0, v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v4, s[6:7], v4, v5
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v1, s[6:7], v1, v0, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v2, v0, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v4, s[6:7], v3, v0, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v3, v2, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v2, v2, v4
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = sdiv i128 %lhs, 8589934592
ret i128 %div
}
@@ -2434,10 +4682,42 @@ define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v3, 0
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_udiv_i128_v_pow2k:
+; GFX9-G: ; %bb.0:
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v4
+; GFX9-G-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v3
+; GFX9-G-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_udiv_i128_v_pow2k:
+; GFX9-G-O0: ; %bb.0:
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v4
+; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v2, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = udiv i128 %lhs, 8589934592
ret i128 %div
}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX9-SDAG: {{.*}}
-; GFX9-SDAG-O0: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
index 46e2632..16a03ba 100644
--- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
@@ -1,25 +1,3248 @@
-; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s 2>&1 | FileCheck -check-prefix=SDAG-ERR %s
-; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s 2>&1 | FileCheck -check-prefix=GISEL-ERR %s
-
-; SDAG-ERR: LLVM ERROR: unsupported libcall legalization
-; GISEL-ERR: LLVM ERROR: unable to legalize instruction: %{{[0-9]+}}:_(s128) = G_SDIV %{{[0-9]+}}:_, %{{[0-9]+}}:_ (in function: v_sdiv_v2i128_vv)
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s | FileCheck -check-prefix=SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -o - %s | FileCheck -check-prefix=GISEL %s
define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_sdiv_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_ashrrev_i32_e32 v24, 31, v3
+; SDAG-NEXT: v_ashrrev_i32_e32 v25, 31, v11
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v26, v24
+; SDAG-NEXT: v_mov_b32_e32 v27, v25
+; SDAG-NEXT: v_xor_b32_e32 v17, v24, v3
+; SDAG-NEXT: v_xor_b32_e32 v18, v24, v2
+; SDAG-NEXT: v_xor_b32_e32 v1, v24, v1
+; SDAG-NEXT: v_xor_b32_e32 v0, v24, v0
+; SDAG-NEXT: v_xor_b32_e32 v19, v25, v11
+; SDAG-NEXT: v_xor_b32_e32 v20, v25, v10
+; SDAG-NEXT: v_xor_b32_e32 v9, v25, v9
+; SDAG-NEXT: v_xor_b32_e32 v8, v25, v8
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v0, v24
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v1, v24, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v0, v2
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v18, v24, vcc
+; SDAG-NEXT: v_add_i32_e64 v1, s[4:5], 32, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v18, v3
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v17, v24, vcc
+; SDAG-NEXT: v_or_b32_e32 v0, v2, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v17, v10
+; SDAG-NEXT: v_min_u32_e32 v18, v1, v18
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, v8, v25
+; SDAG-NEXT: v_or_b32_e32 v1, v3, v11
+; SDAG-NEXT: v_add_i32_e64 v8, s[4:5], 32, v17
+; SDAG-NEXT: v_ffbh_u32_e32 v17, v11
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], 64, v18
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v9, v25, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v28
+; SDAG-NEXT: v_min_u32_e32 v8, v8, v17
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v20, v25, vcc
+; SDAG-NEXT: v_add_i32_e64 v9, s[8:9], 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v29
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v18, v8, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v19, v25, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v28, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v19, v0
+; SDAG-NEXT: v_min_u32_e32 v20, v9, v20
+; SDAG-NEXT: v_or_b32_e32 v9, v29, v1
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 32, v19
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v1
+; SDAG-NEXT: v_add_i32_e32 v20, vcc, 64, v20
+; SDAG-NEXT: v_addc_u32_e64 v22, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_min_u32_e32 v8, v19, v21
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v22, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v20, v8, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v8, vcc, v8, v18
+; SDAG-NEXT: v_subb_u32_e32 v9, vcc, v9, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v17, 0x7f, v8
+; SDAG-NEXT: v_subbrev_u32_e32 v18, vcc, 0, v16, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v19, vcc, 0, v16, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v17, v18
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v9, v19
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e32 v20, v21, v20, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v20
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v11, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v10, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v3, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v2, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, 1, v8
+; SDAG-NEXT: v_sub_i32_e64 v20, s[4:5], 63, v8
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, 0, v9, vcc
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[2:3], v20
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, 0, v18, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v18, v30, v32
+; SDAG-NEXT: v_sub_i32_e32 v34, vcc, 0x7f, v8
+; SDAG-NEXT: v_or_b32_e32 v19, v31, v33
+; SDAG-NEXT: v_lshl_b64 v[8:9], v[10:11], v34
+; SDAG-NEXT: v_sub_i32_e32 v35, vcc, 64, v34
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[2:3], v34
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_lshr_b64 v[18:19], v[2:3], v35
+; SDAG-NEXT: v_or_b32_e32 v9, v9, v19
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v18
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v34
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v21, v9, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v20, v8, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, v22, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v34
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[2:3], v30
+; SDAG-NEXT: v_sub_i32_e32 v35, vcc, 64, v30
+; SDAG-NEXT: v_subrev_i32_e32 v36, vcc, 64, v30
+; SDAG-NEXT: v_lshr_b64 v[37:38], v[10:11], v30
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v28
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_lshl_b64 v[48:49], v[10:11], v35
+; SDAG-NEXT: v_lshr_b64 v[10:11], v[10:11], v36
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v29, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v49
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v48
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v0, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v11, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v10, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, 0, v38, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, 0, v37, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v1, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v17, v3, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: .LBB0_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v16, 31, v3
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v38, 31, v9
+; SDAG-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v39, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v16
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v38
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v39
+; SDAG-NEXT: v_or_b32_e32 v9, v19, v9
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v34, v2
+; SDAG-NEXT: v_or_b32_e32 v8, v18, v8
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v35, v3, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v36, v10, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v37, v11, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v38, 31, v16
+; SDAG-NEXT: v_and_b32_e32 v39, v38, v28
+; SDAG-NEXT: v_and_b32_e32 v48, v38, v29
+; SDAG-NEXT: v_and_b32_e32 v49, v38, v0
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v38
+; SDAG-NEXT: v_and_b32_e32 v38, v38, v1
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v39
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v48, vcc
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v10, v49, vcc
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v11, v38, vcc
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_or_b32_e32 v38, v30, v32
+; SDAG-NEXT: v_or_b32_e32 v39, v31, v33
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[38:39]
+; SDAG-NEXT: v_or_b32_e32 v21, v23, v21
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v20, v22, v20
+; SDAG-NEXT: v_mov_b32_e32 v23, v17
+; SDAG-NEXT: v_mov_b32_e32 v22, v16
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB0_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB0_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v8, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v8
+; SDAG-NEXT: v_or_b32_e32 v20, v19, v1
+; SDAG-NEXT: v_or_b32_e32 v21, v17, v3
+; SDAG-NEXT: v_or_b32_e32 v17, v18, v0
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v2
+; SDAG-NEXT: .LBB0_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_ashrrev_i32_e32 v18, 31, v7
+; SDAG-NEXT: v_ashrrev_i32_e32 v19, 31, v15
+; SDAG-NEXT: v_mov_b32_e32 v9, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v22, v18
+; SDAG-NEXT: v_mov_b32_e32 v23, v19
+; SDAG-NEXT: v_xor_b32_e32 v0, v18, v7
+; SDAG-NEXT: v_xor_b32_e32 v1, v18, v6
+; SDAG-NEXT: v_xor_b32_e32 v3, v18, v5
+; SDAG-NEXT: v_xor_b32_e32 v2, v18, v4
+; SDAG-NEXT: v_xor_b32_e32 v6, v19, v15
+; SDAG-NEXT: v_xor_b32_e32 v7, v19, v14
+; SDAG-NEXT: v_xor_b32_e32 v8, v19, v13
+; SDAG-NEXT: v_xor_b32_e32 v10, v19, v12
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v18
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v18, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v2
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v1, v18, vcc
+; SDAG-NEXT: v_add_i32_e64 v1, s[4:5], 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v3
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v0, v18, vcc
+; SDAG-NEXT: v_or_b32_e32 v0, v2, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v12, v4
+; SDAG-NEXT: v_min_u32_e32 v11, v1, v11
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, v10, v19
+; SDAG-NEXT: v_or_b32_e32 v1, v3, v5
+; SDAG-NEXT: v_add_i32_e64 v10, s[4:5], 32, v12
+; SDAG-NEXT: v_ffbh_u32_e32 v12, v5
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], 64, v11
+; SDAG-NEXT: v_addc_u32_e64 v13, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v8, v19, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v28
+; SDAG-NEXT: v_min_u32_e32 v8, v10, v12
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v13, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v7, v19, vcc
+; SDAG-NEXT: v_add_i32_e64 v7, s[8:9], 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v12, v29
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v11, v8, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v6, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v6, v28, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v0
+; SDAG-NEXT: v_min_u32_e32 v12, v7, v12
+; SDAG-NEXT: v_or_b32_e32 v7, v29, v1
+; SDAG-NEXT: v_add_i32_e32 v11, vcc, 32, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v13, v1
+; SDAG-NEXT: v_add_i32_e32 v12, vcc, 64, v12
+; SDAG-NEXT: v_addc_u32_e64 v14, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_min_u32_e32 v6, v11, v13
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, v14, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v12, v6, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v10, vcc
+; SDAG-NEXT: v_xor_b32_e32 v10, 0x7f, v6
+; SDAG-NEXT: v_subbrev_u32_e32 v8, vcc, 0, v9, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v9, vcc
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v8
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v11, v7, v9
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_and_b32_e32 v10, 1, v12
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v10
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, v5, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v4, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v3, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v2, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, 1, v6
+; SDAG-NEXT: v_sub_i32_e64 v12, s[4:5], 63, v6
+; SDAG-NEXT: v_mov_b32_e32 v10, 0
+; SDAG-NEXT: v_mov_b32_e32 v11, 0
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, 0, v7, vcc
+; SDAG-NEXT: v_lshl_b64 v[12:13], v[2:3], v12
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, 0, v8, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v9, vcc
+; SDAG-NEXT: v_or_b32_e32 v7, v30, v32
+; SDAG-NEXT: v_sub_i32_e32 v9, vcc, 0x7f, v6
+; SDAG-NEXT: v_or_b32_e32 v8, v31, v33
+; SDAG-NEXT: v_lshl_b64 v[14:15], v[4:5], v9
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, 64, v9
+; SDAG-NEXT: v_lshl_b64 v[34:35], v[2:3], v9
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; SDAG-NEXT: v_lshr_b64 v[6:7], v[2:3], v6
+; SDAG-NEXT: v_or_b32_e32 v7, v15, v7
+; SDAG-NEXT: v_or_b32_e32 v6, v14, v6
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v9
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v13, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, v12, v6, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v35, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v34, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v9
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v8, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v12, v4, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v12, 0
+; SDAG-NEXT: v_mov_b32_e32 v13, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[10:11], v[2:3], v30
+; SDAG-NEXT: v_sub_i32_e32 v35, vcc, 64, v30
+; SDAG-NEXT: v_subrev_i32_e32 v36, vcc, 64, v30
+; SDAG-NEXT: v_lshr_b64 v[37:38], v[4:5], v30
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v28
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v14, 0
+; SDAG-NEXT: v_mov_b32_e32 v15, 0
+; SDAG-NEXT: v_mov_b32_e32 v12, 0
+; SDAG-NEXT: v_mov_b32_e32 v13, 0
+; SDAG-NEXT: v_lshl_b64 v[48:49], v[4:5], v35
+; SDAG-NEXT: v_lshr_b64 v[4:5], v[4:5], v36
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v29, vcc
+; SDAG-NEXT: v_or_b32_e32 v11, v11, v49
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v48
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v0, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v5, v11, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v4, v10, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v5, 0, v38, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, v37, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v1, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; SDAG-NEXT: v_mov_b32_e32 v11, 0
+; SDAG-NEXT: .LBB0_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v10, 31, v3
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v38, 31, v9
+; SDAG-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v39, 31, v7
+; SDAG-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v10
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v38
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v39
+; SDAG-NEXT: v_or_b32_e32 v9, v13, v9
+; SDAG-NEXT: v_or_b32_e32 v7, v15, v7
+; SDAG-NEXT: v_or_b32_e32 v8, v12, v8
+; SDAG-NEXT: v_sub_i32_e32 v10, vcc, v34, v2
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v35, v3, vcc
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v36, v4, vcc
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v37, v5, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v15, 31, v10
+; SDAG-NEXT: v_and_b32_e32 v10, 1, v15
+; SDAG-NEXT: v_and_b32_e32 v38, v15, v1
+; SDAG-NEXT: v_and_b32_e32 v39, v15, v0
+; SDAG-NEXT: v_and_b32_e32 v48, v15, v29
+; SDAG-NEXT: v_and_b32_e32 v15, v15, v28
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v15
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v48, vcc
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v4, v39, vcc
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v38, vcc
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_or_b32_e32 v39, v31, v33
+; SDAG-NEXT: v_or_b32_e32 v38, v30, v32
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[38:39]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v6, v14, v6
+; SDAG-NEXT: v_mov_b32_e32 v15, v11
+; SDAG-NEXT: v_mov_b32_e32 v14, v10
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB0_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB0_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[8:9], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 31, v7
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[6:7], 1
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_or_b32_e32 v13, v13, v1
+; SDAG-NEXT: v_or_b32_e32 v14, v11, v3
+; SDAG-NEXT: v_or_b32_e32 v11, v12, v0
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v2
+; SDAG-NEXT: .LBB0_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_xor_b32_e32 v3, v27, v26
+; SDAG-NEXT: v_xor_b32_e32 v2, v25, v24
+; SDAG-NEXT: v_xor_b32_e32 v7, v23, v22
+; SDAG-NEXT: v_xor_b32_e32 v6, v19, v18
+; SDAG-NEXT: v_xor_b32_e32 v4, v20, v3
+; SDAG-NEXT: v_xor_b32_e32 v5, v17, v2
+; SDAG-NEXT: v_xor_b32_e32 v1, v21, v3
+; SDAG-NEXT: v_xor_b32_e32 v0, v16, v2
+; SDAG-NEXT: v_xor_b32_e32 v8, v13, v7
+; SDAG-NEXT: v_xor_b32_e32 v9, v11, v6
+; SDAG-NEXT: v_xor_b32_e32 v11, v14, v7
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v5, v2, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v4, v3, vcc
+; SDAG-NEXT: v_xor_b32_e32 v4, v10, v6
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v4, v6
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v11, v7, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v9, v6, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v8, v7, vcc
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_sdiv_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v24, 31, v3
+; GISEL-NEXT: v_ashrrev_i32_e32 v25, 31, v11
+; GISEL-NEXT: v_mov_b32_e32 v20, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v21, 0
+; GISEL-NEXT: v_xor_b32_e32 v0, v24, v0
+; GISEL-NEXT: v_xor_b32_e32 v1, v24, v1
+; GISEL-NEXT: v_xor_b32_e32 v2, v24, v2
+; GISEL-NEXT: v_xor_b32_e32 v3, v24, v3
+; GISEL-NEXT: v_xor_b32_e32 v8, v25, v8
+; GISEL-NEXT: v_xor_b32_e32 v9, v25, v9
+; GISEL-NEXT: v_xor_b32_e32 v10, v25, v10
+; GISEL-NEXT: v_xor_b32_e32 v11, v25, v11
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v0, v24
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v1, v24, vcc
+; GISEL-NEXT: v_sub_i32_e64 v26, s[4:5], v8, v25
+; GISEL-NEXT: v_subb_u32_e64 v27, s[4:5], v9, v25, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v18, vcc, v2, v24, vcc
+; GISEL-NEXT: v_subb_u32_e32 v19, vcc, v3, v24, vcc
+; GISEL-NEXT: v_subb_u32_e64 v10, vcc, v10, v25, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v11, v25, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v8, v27
+; GISEL-NEXT: v_ffbh_u32_e32 v9, v26
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v26, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v27, v11
+; GISEL-NEXT: v_or_b32_e32 v2, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v3, v17, v19
+; GISEL-NEXT: v_add_i32_e32 v9, vcc, 32, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v10
+; GISEL-NEXT: v_add_i32_e32 v23, vcc, 32, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v30, v19
+; GISEL-NEXT: v_ffbh_u32_e32 v31, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GISEL-NEXT: v_min_u32_e32 v0, v8, v9
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v29
+; GISEL-NEXT: v_min_u32_e32 v2, v22, v23
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v31
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v28, v1
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 64, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v30, v3
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v2, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[20:21]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v8, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v2
+; GISEL-NEXT: v_or_b32_e32 v9, v1, v3
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v20, v21, v20, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v9, v22, v20
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v9
+; GISEL-NEXT: v_or_b32_e32 v8, v9, v8
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v20, v16, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v21, v17, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, v18, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v19, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v28, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v29, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v32, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v30, vcc, 0, v2, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, 0, v3, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v20, s[4:5], 64, v32
+; GISEL-NEXT: v_sub_i32_e64 v8, s[4:5], 64, v32
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[16:17], v32
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[18:19], v32
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[8:9], v[16:17], v8
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[16:17], v20
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v32
+; GISEL-NEXT: v_cndmask_b32_e32 v20, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v8, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v9, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v32
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v0, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v1, v19, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v34, vcc, 64, v28
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v28
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[18:19], v28
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[16:17], v28
+; GISEL-NEXT: v_add_i32_e32 v32, vcc, -1, v26
+; GISEL-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v28
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v28
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v27, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[18:19], v22
+; GISEL-NEXT: v_lshr_b64 v[36:37], v[18:19], v34
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, v1, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v34, vcc, -1, v10, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v1, v3, v23
+; GISEL-NEXT: v_addc_u32_e32 v35, vcc, -1, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v0, v36, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v1, v37, v1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, v0, v16, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v23, v1, v17, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v17, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: .LBB0_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshrrev_b32_e32 v16, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[36:37], v[22:23], 1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v23
+; GISEL-NEXT: v_lshrrev_b32_e32 v23, 31, v9
+; GISEL-NEXT: v_add_i32_e32 v28, vcc, -1, v28
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; GISEL-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v18, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v36, v23
+; GISEL-NEXT: v_addc_u32_e32 v30, vcc, -1, v30, vcc
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v16
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v32, v3
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v33, v37, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v28, v30
+; GISEL-NEXT: v_or_b32_e32 v1, v29, v31
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v34, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v35, v19, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v16
+; GISEL-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v1, v0, v26
+; GISEL-NEXT: v_and_b32_e32 v18, v0, v27
+; GISEL-NEXT: v_and_b32_e32 v16, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v36, v0, v10
+; GISEL-NEXT: v_and_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, v3, v1
+; GISEL-NEXT: v_subb_u32_e32 v23, vcc, v37, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v18, vcc, v2, v36, vcc
+; GISEL-NEXT: v_subb_u32_e32 v19, vcc, v19, v0, vcc
+; GISEL-NEXT: v_mov_b32_e32 v0, v16
+; GISEL-NEXT: v_mov_b32_e32 v1, v17
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GISEL-NEXT: s_cbranch_execnz .LBB0_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: .LBB0_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[14:15]
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v10, 31, v21
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: .LBB0_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v18, 31, v7
+; GISEL-NEXT: v_ashrrev_i32_e32 v19, 31, v15
+; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v11, 0
+; GISEL-NEXT: v_xor_b32_e32 v0, v18, v4
+; GISEL-NEXT: v_xor_b32_e32 v1, v18, v5
+; GISEL-NEXT: v_xor_b32_e32 v2, v18, v6
+; GISEL-NEXT: v_xor_b32_e32 v3, v18, v7
+; GISEL-NEXT: v_xor_b32_e32 v4, v19, v12
+; GISEL-NEXT: v_xor_b32_e32 v5, v19, v13
+; GISEL-NEXT: v_xor_b32_e32 v14, v19, v14
+; GISEL-NEXT: v_xor_b32_e32 v15, v19, v15
+; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v0, v18
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v1, v18, vcc
+; GISEL-NEXT: v_sub_i32_e64 v22, s[4:5], v4, v19
+; GISEL-NEXT: v_subb_u32_e64 v23, s[4:5], v5, v19, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v12, vcc, v2, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v18, vcc
+; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v19, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v19, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v14, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v15, v22
+; GISEL-NEXT: v_ffbh_u32_e32 v16, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v17, v6
+; GISEL-NEXT: v_or_b32_e32 v0, v22, v4
+; GISEL-NEXT: v_or_b32_e32 v1, v23, v5
+; GISEL-NEXT: v_or_b32_e32 v2, v6, v12
+; GISEL-NEXT: v_or_b32_e32 v3, v7, v13
+; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v4
+; GISEL-NEXT: v_add_i32_e32 v17, vcc, 32, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v12
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GISEL-NEXT: v_min_u32_e32 v0, v14, v15
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v27
+; GISEL-NEXT: v_min_u32_e32 v2, v16, v17
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v29
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v26, v1
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 64, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v28, v3
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v2, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v10, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v2
+; GISEL-NEXT: v_or_b32_e32 v11, v1, v3
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v11, v14, v15
+; GISEL-NEXT: v_and_b32_e32 v14, 1, v11
+; GISEL-NEXT: v_or_b32_e32 v10, v11, v10
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GISEL-NEXT: v_cndmask_b32_e64 v14, v6, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v16, 1, v10
+; GISEL-NEXT: v_cndmask_b32_e64 v15, v7, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v12, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v13, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v27, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v30, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v2, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v3, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v14, s[4:5], 64, v30
+; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v30
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[6:7], v30
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], v30
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[10:11], v[6:7], v10
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v14
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v14, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v15, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v10, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v11, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v0, v12, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v1, v13, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v32, vcc, 64, v26
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, 64, v26
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[12:13], v26
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[6:7], v26
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v22
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v23, vcc
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[12:13], v16
+; GISEL-NEXT: v_lshr_b64 v[12:13], v[12:13], v32
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v4, vcc
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v5, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v16
+; GISEL-NEXT: v_or_b32_e32 v3, v3, v17
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v12, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v2, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v3, v7, vcc
+; GISEL-NEXT: v_mov_b32_e32 v7, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: .LBB0_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], 1
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v13
+; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v11
+; GISEL-NEXT: v_lshl_b64 v[12:13], v[14:15], 1
+; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26
+; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v16, v6
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v14
+; GISEL-NEXT: v_or_b32_e32 v14, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v15, v1, v13
+; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v2
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v31, v3, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v26, v28
+; GISEL-NEXT: v_or_b32_e32 v1, v27, v29
+; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v32, v16, vcc
+; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v33, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v6
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v6, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v12, v0, v22
+; GISEL-NEXT: v_and_b32_e32 v13, v0, v23
+; GISEL-NEXT: v_and_b32_e32 v34, v0, v4
+; GISEL-NEXT: v_and_b32_e32 v35, v0, v5
+; GISEL-NEXT: v_mov_b32_e32 v0, v6
+; GISEL-NEXT: v_mov_b32_e32 v1, v7
+; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v2, v12
+; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v13, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v34, vcc
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB0_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB0_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
+; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v4
+; GISEL-NEXT: v_or_b32_e32 v14, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v15, v1, v3
+; GISEL-NEXT: .LBB0_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_xor_b32_e32 v3, v25, v24
+; GISEL-NEXT: v_xor_b32_e32 v7, v19, v18
+; GISEL-NEXT: v_xor_b32_e32 v0, v20, v3
+; GISEL-NEXT: v_xor_b32_e32 v1, v21, v3
+; GISEL-NEXT: v_xor_b32_e32 v2, v8, v3
+; GISEL-NEXT: v_xor_b32_e32 v6, v9, v3
+; GISEL-NEXT: v_xor_b32_e32 v4, v14, v7
+; GISEL-NEXT: v_xor_b32_e32 v5, v15, v7
+; GISEL-NEXT: v_xor_b32_e32 v8, v10, v7
+; GISEL-NEXT: v_xor_b32_e32 v9, v11, v7
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
+; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v7
+; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v5, v7, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v2, vcc, v2, v3, vcc
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v6, v3, vcc
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v8, v7, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v9, v7, vcc
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = sdiv <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
define <2 x i128> @v_udiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_udiv_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v17, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v16, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v19, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v18, v0, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v8
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v9
+; SDAG-NEXT: v_ffbh_u32_e32 v24, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v25, v3
+; SDAG-NEXT: v_ffbh_u32_e32 v26, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v27, v1
+; SDAG-NEXT: v_mov_b32_e32 v28, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; SDAG-NEXT: v_add_i32_e64 v16, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v17, s[6:7], 32, v22
+; SDAG-NEXT: v_add_i32_e64 v18, s[6:7], 32, v24
+; SDAG-NEXT: v_add_i32_e64 v19, s[6:7], 32, v26
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v16, v16, v21
+; SDAG-NEXT: v_min_u32_e32 v17, v17, v23
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v25
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v27
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, 64, v17
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v20, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_sub_i32_e32 v23, vcc, v16, v18
+; SDAG-NEXT: v_subb_u32_e32 v24, vcc, v20, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v16, 0x7f, v23
+; SDAG-NEXT: v_subbrev_u32_e32 v25, vcc, 0, v28, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[23:24]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v26, vcc, 0, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v25
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[25:26]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v24, v26
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[25:26]
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v18
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v3, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v2, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v1, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v0, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v18, vcc, 1, v23
+; SDAG-NEXT: v_sub_i32_e64 v16, s[4:5], 63, v23
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_addc_u32_e32 v27, vcc, 0, v24, vcc
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[0:1], v16
+; SDAG-NEXT: v_addc_u32_e32 v28, vcc, 0, v25, vcc
+; SDAG-NEXT: v_addc_u32_e32 v29, vcc, 0, v26, vcc
+; SDAG-NEXT: v_or_b32_e32 v19, v18, v28
+; SDAG-NEXT: v_sub_i32_e32 v30, vcc, 0x7f, v23
+; SDAG-NEXT: v_or_b32_e32 v20, v27, v29
+; SDAG-NEXT: v_lshl_b64 v[23:24], v[2:3], v30
+; SDAG-NEXT: v_sub_i32_e32 v31, vcc, 64, v30
+; SDAG-NEXT: v_lshl_b64 v[25:26], v[0:1], v30
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[19:20]
+; SDAG-NEXT: v_lshr_b64 v[19:20], v[0:1], v31
+; SDAG-NEXT: v_or_b32_e32 v20, v24, v20
+; SDAG-NEXT: v_or_b32_e32 v19, v23, v19
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v17, v20, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v16, v19, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v24, 0, v26, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, v25, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v17, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v16, v2, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[21:22], v[0:1], v18
+; SDAG-NEXT: v_sub_i32_e32 v31, vcc, 64, v18
+; SDAG-NEXT: v_subrev_i32_e32 v36, vcc, 64, v18
+; SDAG-NEXT: v_lshr_b64 v[32:33], v[2:3], v18
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v8
+; SDAG-NEXT: s_mov_b64 s[12:13], 0
+; SDAG-NEXT: v_mov_b32_e32 v25, 0
+; SDAG-NEXT: v_mov_b32_e32 v26, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v18
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v18
+; SDAG-NEXT: v_lshl_b64 v[34:35], v[2:3], v31
+; SDAG-NEXT: v_lshr_b64 v[36:37], v[2:3], v36
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v9, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v33, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v32, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v22, v22, v35
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v34
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v22, v37, v22, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v36, v21, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v11, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v22, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, v21, v0, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: .LBB1_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshrrev_b32_e32 v21, 31, v24
+; SDAG-NEXT: v_lshl_b64 v[23:24], v[23:24], 1
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v34, 31, v1
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v35, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_or_b32_e32 v24, v26, v24
+; SDAG-NEXT: v_or_b32_e32 v23, v25, v23
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v34
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v35
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v21
+; SDAG-NEXT: v_sub_i32_e32 v21, vcc, v30, v0
+; SDAG-NEXT: v_subb_u32_e32 v21, vcc, v31, v1, vcc
+; SDAG-NEXT: v_subb_u32_e32 v21, vcc, v32, v2, vcc
+; SDAG-NEXT: v_subb_u32_e32 v21, vcc, v33, v3, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v21, 31, v21
+; SDAG-NEXT: v_and_b32_e32 v25, v21, v8
+; SDAG-NEXT: v_and_b32_e32 v26, v21, v9
+; SDAG-NEXT: v_and_b32_e32 v34, v21, v10
+; SDAG-NEXT: v_and_b32_e32 v35, v21, v11
+; SDAG-NEXT: v_and_b32_e32 v21, 1, v21
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v25
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v26, vcc
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v2, v34, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v35, vcc
+; SDAG-NEXT: v_add_i32_e32 v18, vcc, -1, v18
+; SDAG-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
+; SDAG-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
+; SDAG-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; SDAG-NEXT: v_or_b32_e32 v25, v18, v28
+; SDAG-NEXT: v_or_b32_e32 v26, v27, v29
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[25:26]
+; SDAG-NEXT: v_or_b32_e32 v17, v20, v17
+; SDAG-NEXT: s_or_b64 s[12:13], vcc, s[12:13]
+; SDAG-NEXT: v_or_b32_e32 v16, v19, v16
+; SDAG-NEXT: v_mov_b32_e32 v26, v22
+; SDAG-NEXT: v_mov_b32_e32 v25, v21
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[12:13]
+; SDAG-NEXT: s_cbranch_execnz .LBB1_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB1_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[16:17], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v8, 31, v24
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[23:24], 1
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v8
+; SDAG-NEXT: v_or_b32_e32 v16, v20, v1
+; SDAG-NEXT: v_or_b32_e32 v18, v22, v3
+; SDAG-NEXT: v_or_b32_e32 v17, v19, v0
+; SDAG-NEXT: v_or_b32_e32 v19, v21, v2
+; SDAG-NEXT: .LBB1_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_or_b32_e32 v1, v13, v15
+; SDAG-NEXT: v_or_b32_e32 v0, v12, v14
+; SDAG-NEXT: v_or_b32_e32 v3, v5, v7
+; SDAG-NEXT: v_or_b32_e32 v2, v4, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v8, v14
+; SDAG-NEXT: v_ffbh_u32_e32 v9, v15
+; SDAG-NEXT: v_ffbh_u32_e32 v10, v12
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v13
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v7
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v5
+; SDAG-NEXT: v_mov_b32_e32 v24, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; SDAG-NEXT: v_add_i32_e64 v0, s[6:7], 32, v8
+; SDAG-NEXT: v_add_i32_e64 v1, s[6:7], 32, v10
+; SDAG-NEXT: v_add_i32_e64 v2, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v3, s[6:7], 32, v22
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v0, v0, v9
+; SDAG-NEXT: v_min_u32_e32 v1, v1, v11
+; SDAG-NEXT: v_min_u32_e32 v2, v2, v21
+; SDAG-NEXT: v_min_u32_e32 v3, v3, v23
+; SDAG-NEXT: v_add_i32_e32 v1, vcc, 64, v1
+; SDAG-NEXT: v_addc_u32_e64 v8, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v3, vcc, 64, v3
+; SDAG-NEXT: v_addc_u32_e64 v9, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v8, v1, vcc
+; SDAG-NEXT: v_xor_b32_e32 v8, 0x7f, v0
+; SDAG-NEXT: v_subbrev_u32_e32 v2, vcc, 0, v24, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v24, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v8, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v9, v1, v3
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; SDAG-NEXT: v_and_b32_e32 v8, 1, v10
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v8
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v7, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v6, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v5, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v4, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v8, vcc, 1, v0
+; SDAG-NEXT: v_sub_i32_e64 v9, s[4:5], 63, v0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_addc_u32_e32 v11, vcc, 0, v1, vcc
+; SDAG-NEXT: v_lshl_b64 v[9:10], v[4:5], v9
+; SDAG-NEXT: v_addc_u32_e32 v24, vcc, 0, v2, vcc
+; SDAG-NEXT: v_addc_u32_e32 v25, vcc, 0, v3, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v8, v24
+; SDAG-NEXT: v_sub_i32_e32 v3, vcc, 0x7f, v0
+; SDAG-NEXT: v_or_b32_e32 v2, v11, v25
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[6:7], v3
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, 64, v3
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[4:5], v3
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[1:2]
+; SDAG-NEXT: v_lshr_b64 v[0:1], v[4:5], v0
+; SDAG-NEXT: v_or_b32_e32 v1, v23, v1
+; SDAG-NEXT: v_or_b32_e32 v0, v22, v0
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v3
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v10, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v26, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v2, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v9, v6, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v9, 0
+; SDAG-NEXT: v_mov_b32_e32 v10, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[20:21], v[4:5], v8
+; SDAG-NEXT: v_sub_i32_e32 v27, vcc, 64, v8
+; SDAG-NEXT: v_subrev_i32_e32 v28, vcc, 64, v8
+; SDAG-NEXT: v_lshr_b64 v[29:30], v[6:7], v8
+; SDAG-NEXT: v_add_i32_e32 v26, vcc, -1, v12
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_mov_b32_e32 v9, 0
+; SDAG-NEXT: v_mov_b32_e32 v10, 0
+; SDAG-NEXT: v_lshl_b64 v[31:32], v[6:7], v27
+; SDAG-NEXT: v_lshr_b64 v[6:7], v[6:7], v28
+; SDAG-NEXT: v_addc_u32_e32 v27, vcc, -1, v13, vcc
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v32
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v31
+; SDAG-NEXT: v_addc_u32_e32 v28, vcc, -1, v14, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v8
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v7, v21, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v6, v20, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v30, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v29, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v29, vcc, -1, v15, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; SDAG-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: .LBB1_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v20, 31, v5
+; SDAG-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v30, 31, v3
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v31, 31, v1
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; SDAG-NEXT: v_or_b32_e32 v6, v6, v20
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v30
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v31
+; SDAG-NEXT: v_or_b32_e32 v3, v10, v3
+; SDAG-NEXT: v_or_b32_e32 v1, v23, v1
+; SDAG-NEXT: v_or_b32_e32 v2, v9, v2
+; SDAG-NEXT: v_sub_i32_e32 v20, vcc, v26, v4
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v27, v5, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v28, v6, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v29, v7, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v23, 31, v20
+; SDAG-NEXT: v_and_b32_e32 v20, 1, v23
+; SDAG-NEXT: v_and_b32_e32 v30, v23, v15
+; SDAG-NEXT: v_and_b32_e32 v31, v23, v14
+; SDAG-NEXT: v_and_b32_e32 v32, v23, v13
+; SDAG-NEXT: v_and_b32_e32 v23, v23, v12
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v4, v23
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v32, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v6, v31, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v30, vcc
+; SDAG-NEXT: v_add_i32_e32 v8, vcc, -1, v8
+; SDAG-NEXT: v_addc_u32_e32 v11, vcc, -1, v11, vcc
+; SDAG-NEXT: v_addc_u32_e32 v24, vcc, -1, v24, vcc
+; SDAG-NEXT: v_addc_u32_e32 v25, vcc, -1, v25, vcc
+; SDAG-NEXT: v_or_b32_e32 v31, v11, v25
+; SDAG-NEXT: v_or_b32_e32 v30, v8, v24
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[30:31]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v0, v22, v0
+; SDAG-NEXT: v_mov_b32_e32 v23, v21
+; SDAG-NEXT: v_mov_b32_e32 v22, v20
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB1_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB1_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 31, v1
+; SDAG-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v4
+; SDAG-NEXT: v_or_b32_e32 v8, v10, v3
+; SDAG-NEXT: v_or_b32_e32 v10, v21, v1
+; SDAG-NEXT: v_or_b32_e32 v9, v9, v2
+; SDAG-NEXT: v_or_b32_e32 v11, v20, v0
+; SDAG-NEXT: .LBB1_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v19
+; SDAG-NEXT: v_mov_b32_e32 v1, v18
+; SDAG-NEXT: v_mov_b32_e32 v2, v17
+; SDAG-NEXT: v_mov_b32_e32 v3, v16
+; SDAG-NEXT: v_mov_b32_e32 v4, v11
+; SDAG-NEXT: v_mov_b32_e32 v5, v10
+; SDAG-NEXT: v_mov_b32_e32 v6, v9
+; SDAG-NEXT: v_mov_b32_e32 v7, v8
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_udiv_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v16, v2
+; GISEL-NEXT: v_mov_b32_e32 v17, v3
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v2, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v3, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v18, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v19, v1, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v8
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v10
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v1
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v16
+; GISEL-NEXT: v_mov_b32_e32 v24, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v25, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 32, v21
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v18, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v19, s[6:7], 32, v29
+; GISEL-NEXT: v_min_u32_e32 v2, v20, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v22, v3
+; GISEL-NEXT: v_min_u32_e32 v18, v26, v18
+; GISEL-NEXT: v_min_u32_e32 v19, v28, v19
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v2, vcc, 64, v2
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, 64, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v19, v18, vcc
+; GISEL-NEXT: v_sub_i32_e32 v20, vcc, v2, v3
+; GISEL-NEXT: v_subb_u32_e64 v21, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v22, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v23, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[20:21], v[24:25]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v2, 0x7f, v20
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v21, v23
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v26, v18
+; GISEL-NEXT: v_and_b32_e32 v18, 1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v3, v2
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v18, v0, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v24, 1, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v19, v1, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v2, v16, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v17, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v24
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, 1, v20
+; GISEL-NEXT: v_addc_u32_e64 v27, s[4:5], 0, v21, vcc
+; GISEL-NEXT: v_sub_i32_e32 v30, vcc, 0x7f, v20
+; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v22, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v23, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v22, s[4:5], 64, v30
+; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], 64, v30
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[0:1], v30
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[16:17], v30
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[0:1], v20
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[0:1], v22
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v22, 0, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v23, 0, v3, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v20, v18
+; GISEL-NEXT: v_or_b32_e32 v3, v21, v19
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v24, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v25, v3, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v21, s11
+; GISEL-NEXT: v_mov_b32_e32 v20, s10
+; GISEL-NEXT: v_mov_b32_e32 v19, s9
+; GISEL-NEXT: v_mov_b32_e32 v18, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v32, vcc, 64, v26
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 64, v26
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[16:17], v26
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[0:1], v26
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v8
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v9, vcc
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[16:17], v24
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[16:17], v32
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v10, vcc
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v11, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v24
+; GISEL-NEXT: v_or_b32_e32 v21, v21, v25
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v20, v16, v20, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v17, v21, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v19, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v20, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, v21, v1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v21, s7
+; GISEL-NEXT: v_mov_b32_e32 v20, s6
+; GISEL-NEXT: v_mov_b32_e32 v19, s5
+; GISEL-NEXT: v_mov_b32_e32 v18, s4
+; GISEL-NEXT: .LBB1_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v23
+; GISEL-NEXT: v_lshl_b64 v[20:21], v[22:23], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v25
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v35, 31, v3
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26
+; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; GISEL-NEXT: v_or_b32_e32 v22, v18, v20
+; GISEL-NEXT: v_or_b32_e32 v23, v19, v21
+; GISEL-NEXT: v_or_b32_e32 v16, v16, v0
+; GISEL-NEXT: v_or_b32_e32 v20, v24, v35
+; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v20
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v31, v25, vcc
+; GISEL-NEXT: v_or_b32_e32 v18, v26, v28
+; GISEL-NEXT: v_or_b32_e32 v19, v27, v29
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v32, v16, vcc
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v33, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v0
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v18, v0, v8
+; GISEL-NEXT: v_and_b32_e32 v19, v0, v9
+; GISEL-NEXT: v_and_b32_e32 v21, v0, v10
+; GISEL-NEXT: v_and_b32_e32 v35, v0, v11
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, v20, v18
+; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v25, v19, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v21, vcc
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
+; GISEL-NEXT: v_mov_b32_e32 v19, v1
+; GISEL-NEXT: v_mov_b32_e32 v18, v0
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB1_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB1_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[22:23], 1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v8, 31, v23
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v8
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v0
+; GISEL-NEXT: v_or_b32_e32 v19, v19, v1
+; GISEL-NEXT: .LBB1_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v0, v12, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v13, v15
+; GISEL-NEXT: v_or_b32_e32 v8, v4, v6
+; GISEL-NEXT: v_or_b32_e32 v9, v5, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v16, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v17, v12
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v14
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v4
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v6
+; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v11, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 32, v17
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v21
+; GISEL-NEXT: v_add_i32_e64 v8, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v9, s[6:7], 32, v25
+; GISEL-NEXT: v_min_u32_e32 v0, v16, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v20, v1
+; GISEL-NEXT: v_min_u32_e32 v8, v22, v8
+; GISEL-NEXT: v_min_u32_e32 v9, v24, v9
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v0, vcc, 64, v0
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, 64, v8
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v9, v8, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v16, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v8, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v16
+; GISEL-NEXT: v_or_b32_e32 v9, v1, v17
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v9, v20, v10
+; GISEL-NEXT: v_and_b32_e32 v10, 1, v9
+; GISEL-NEXT: v_or_b32_e32 v8, v9, v8
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v4, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v5, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, v6, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v7, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v11, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v24, vcc, 0, v16, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v25, vcc, 0, v17, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v9, s[4:5], 64, v26
+; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v26
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[4:5], v26
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v26
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[4:5], v10
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[4:5], v9
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v20, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v21, v17
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v23, s11
+; GISEL-NEXT: v_mov_b32_e32 v22, s10
+; GISEL-NEXT: v_mov_b32_e32 v21, s9
+; GISEL-NEXT: v_mov_b32_e32 v20, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v28, vcc, 64, v8
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v8
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[6:7], v8
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[4:5], v8
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v12
+; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v13, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[6:7], v22
+; GISEL-NEXT: v_lshr_b64 v[6:7], v[6:7], v28
+; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v14, vcc
+; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v15, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v22
+; GISEL-NEXT: v_or_b32_e32 v21, v21, v23
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v6, v6, v20, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v7, v7, v21, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v6, v6, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v7, v7, v5, vcc
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: v_mov_b32_e32 v23, s7
+; GISEL-NEXT: v_mov_b32_e32 v22, s6
+; GISEL-NEXT: v_mov_b32_e32 v21, s5
+; GISEL-NEXT: v_mov_b32_e32 v20, s4
+; GISEL-NEXT: .LBB1_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[6:7], 1
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v7
+; GISEL-NEXT: v_lshrrev_b32_e32 v30, 31, v1
+; GISEL-NEXT: v_lshl_b64 v[6:7], v[9:10], 1
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v9, 31, v10
+; GISEL-NEXT: v_add_i32_e32 v8, vcc, -1, v8
+; GISEL-NEXT: v_addc_u32_e32 v11, vcc, -1, v11, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v16, v4
+; GISEL-NEXT: v_or_b32_e32 v22, v22, v30
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v9, v20, v6
+; GISEL-NEXT: v_or_b32_e32 v10, v21, v7
+; GISEL-NEXT: v_addc_u32_e32 v24, vcc, -1, v24, vcc
+; GISEL-NEXT: v_addc_u32_e32 v25, vcc, -1, v25, vcc
+; GISEL-NEXT: v_sub_i32_e32 v4, vcc, v26, v22
+; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v27, v23, vcc
+; GISEL-NEXT: v_or_b32_e32 v6, v8, v24
+; GISEL-NEXT: v_or_b32_e32 v7, v11, v25
+; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v28, v16, vcc
+; GISEL-NEXT: v_subb_u32_e32 v4, vcc, v29, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v4
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v4, 1, v6
+; GISEL-NEXT: v_and_b32_e32 v7, v6, v12
+; GISEL-NEXT: v_and_b32_e32 v30, v6, v13
+; GISEL-NEXT: v_and_b32_e32 v31, v6, v14
+; GISEL-NEXT: v_and_b32_e32 v32, v6, v15
+; GISEL-NEXT: v_mov_b32_e32 v21, v5
+; GISEL-NEXT: v_mov_b32_e32 v20, v4
+; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v22, v7
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v23, v30, vcc
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v31, vcc
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v32, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB1_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB1_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[4:5], v[9:10], 1
+; GISEL-NEXT: v_lshl_b64 v[8:9], v[0:1], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v10
+; GISEL-NEXT: v_or_b32_e32 v8, v8, v0
+; GISEL-NEXT: v_or_b32_e32 v10, v20, v4
+; GISEL-NEXT: v_or_b32_e32 v11, v21, v5
+; GISEL-NEXT: .LBB1_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_mov_b32_e32 v0, v18
+; GISEL-NEXT: v_mov_b32_e32 v1, v19
+; GISEL-NEXT: v_mov_b32_e32 v4, v10
+; GISEL-NEXT: v_mov_b32_e32 v5, v11
+; GISEL-NEXT: v_mov_b32_e32 v6, v8
+; GISEL-NEXT: v_mov_b32_e32 v7, v9
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = udiv <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
define <2 x i128> @v_srem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_srem_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
+; SDAG-NEXT: v_ashrrev_i32_e32 v28, 31, v3
+; SDAG-NEXT: v_ashrrev_i32_e32 v16, 31, v11
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v29, v28
+; SDAG-NEXT: v_xor_b32_e32 v18, v3, v28
+; SDAG-NEXT: v_xor_b32_e32 v19, v2, v28
+; SDAG-NEXT: v_xor_b32_e32 v1, v1, v28
+; SDAG-NEXT: v_xor_b32_e32 v0, v0, v28
+; SDAG-NEXT: v_xor_b32_e32 v11, v11, v16
+; SDAG-NEXT: v_xor_b32_e32 v10, v10, v16
+; SDAG-NEXT: v_xor_b32_e32 v20, v9, v16
+; SDAG-NEXT: v_xor_b32_e32 v9, v8, v16
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v0, v28
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v1, v28, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v2
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v19, v28, vcc
+; SDAG-NEXT: v_add_i32_e64 v19, s[4:5], 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v3
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v18, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v2, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v18, v0
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v21
+; SDAG-NEXT: v_sub_i32_e32 v31, vcc, v9, v16
+; SDAG-NEXT: v_or_b32_e32 v9, v3, v1
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], 32, v18
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v1
+; SDAG-NEXT: v_add_i32_e64 v19, s[4:5], 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v22, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v30, vcc, v20, v16, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; SDAG-NEXT: v_ffbh_u32_e32 v9, v31
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v21
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v22, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v8, vcc, v10, v16, vcc
+; SDAG-NEXT: v_add_i32_e64 v21, s[8:9], 32, v9
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v19, v18, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v9, vcc, v11, v16, vcc
+; SDAG-NEXT: v_or_b32_e32 v10, v31, v8
+; SDAG-NEXT: v_ffbh_u32_e32 v16, v8
+; SDAG-NEXT: v_min_u32_e32 v19, v21, v22
+; SDAG-NEXT: v_or_b32_e32 v11, v30, v9
+; SDAG-NEXT: v_add_i32_e32 v16, vcc, 32, v16
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v9
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v22, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_min_u32_e32 v10, v16, v21
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[8:9]
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v22, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v19, v10, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v10, vcc, v10, v18
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v11, v20, vcc
+; SDAG-NEXT: v_xor_b32_e32 v16, 0x7f, v10
+; SDAG-NEXT: v_subbrev_u32_e32 v18, vcc, 0, v17, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v19, vcc, 0, v17, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v18
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v11, v19
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e32 v20, v21, v20, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v20
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v35, v1, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v32, v0, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v27, v3, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v33, v2, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v32, vcc, 1, v10
+; SDAG-NEXT: v_sub_i32_e64 v20, s[4:5], 63, v10
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v11, vcc
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[2:3], v20
+; SDAG-NEXT: v_addc_u32_e32 v34, vcc, 0, v18, vcc
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, 0, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v18, v32, v34
+; SDAG-NEXT: v_sub_i32_e32 v24, vcc, 0x7f, v10
+; SDAG-NEXT: v_or_b32_e32 v19, v33, v35
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[0:1], v24
+; SDAG-NEXT: v_sub_i32_e32 v25, vcc, 64, v24
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[2:3], v24
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_lshr_b64 v[18:19], v[2:3], v25
+; SDAG-NEXT: v_or_b32_e32 v11, v11, v19
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v18
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v24
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v21, v11, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v20, v10, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, 0, v22, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v24
+; SDAG-NEXT: v_cndmask_b32_e64 v11, v11, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v10, v10, v0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[2:3], v32
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, 64, v32
+; SDAG-NEXT: v_subrev_i32_e32 v37, vcc, 64, v32
+; SDAG-NEXT: v_lshr_b64 v[24:25], v[0:1], v32
+; SDAG-NEXT: v_add_i32_e32 v36, vcc, -1, v31
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[0:1], v26
+; SDAG-NEXT: v_lshr_b64 v[48:49], v[0:1], v37
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v30, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v27
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v26
+; SDAG-NEXT: v_addc_u32_e32 v38, vcc, -1, v8, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v32
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v49, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v48, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v27, 0, v25, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v26, 0, v24, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, -1, v9, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v32
+; SDAG-NEXT: v_cndmask_b32_e32 v25, v17, v3, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v24, v16, v2, vcc
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: .LBB2_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v16, 31, v25
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[24:25], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v48, 31, v11
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v49, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v26, v26, v16
+; SDAG-NEXT: v_or_b32_e32 v24, v24, v48
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v49
+; SDAG-NEXT: v_or_b32_e32 v11, v19, v11
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v36, v24
+; SDAG-NEXT: v_or_b32_e32 v10, v18, v10
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v37, v25, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v38, v26, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v39, v27, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v16, 31, v16
+; SDAG-NEXT: v_and_b32_e32 v48, v16, v31
+; SDAG-NEXT: v_and_b32_e32 v49, v16, v30
+; SDAG-NEXT: v_and_b32_e32 v50, v16, v8
+; SDAG-NEXT: v_and_b32_e32 v51, v16, v9
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v16
+; SDAG-NEXT: v_sub_i32_e32 v24, vcc, v24, v48
+; SDAG-NEXT: v_subb_u32_e32 v25, vcc, v25, v49, vcc
+; SDAG-NEXT: v_subb_u32_e32 v26, vcc, v26, v50, vcc
+; SDAG-NEXT: v_subb_u32_e32 v27, vcc, v27, v51, vcc
+; SDAG-NEXT: v_add_i32_e32 v32, vcc, -1, v32
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_addc_u32_e32 v34, vcc, -1, v34, vcc
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v35, vcc
+; SDAG-NEXT: v_or_b32_e32 v48, v32, v34
+; SDAG-NEXT: v_or_b32_e32 v49, v33, v35
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[48:49]
+; SDAG-NEXT: v_or_b32_e32 v21, v23, v21
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v20, v22, v20
+; SDAG-NEXT: v_mov_b32_e32 v23, v17
+; SDAG-NEXT: v_mov_b32_e32 v22, v16
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB2_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB2_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v22, 31, v21
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; SDAG-NEXT: v_or_b32_e32 v10, v10, v22
+; SDAG-NEXT: v_or_b32_e32 v35, v19, v11
+; SDAG-NEXT: v_or_b32_e32 v27, v17, v21
+; SDAG-NEXT: v_or_b32_e32 v32, v18, v10
+; SDAG-NEXT: v_or_b32_e32 v33, v16, v20
+; SDAG-NEXT: .LBB2_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_ashrrev_i32_e32 v26, 31, v7
+; SDAG-NEXT: v_ashrrev_i32_e32 v16, 31, v15
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: s_mov_b64 s[10:11], 0x7f
+; SDAG-NEXT: v_mov_b32_e32 v34, v26
+; SDAG-NEXT: v_xor_b32_e32 v10, v7, v26
+; SDAG-NEXT: v_xor_b32_e32 v11, v6, v26
+; SDAG-NEXT: v_xor_b32_e32 v5, v5, v26
+; SDAG-NEXT: v_xor_b32_e32 v4, v4, v26
+; SDAG-NEXT: v_xor_b32_e32 v15, v15, v16
+; SDAG-NEXT: v_xor_b32_e32 v14, v14, v16
+; SDAG-NEXT: v_xor_b32_e32 v13, v13, v16
+; SDAG-NEXT: v_xor_b32_e32 v12, v12, v16
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, v4, v26
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v5, v26, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v6
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v11, v26, vcc
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v18, v7
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v10, v26, vcc
+; SDAG-NEXT: v_or_b32_e32 v10, v6, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v19, v4
+; SDAG-NEXT: v_min_u32_e32 v18, v11, v18
+; SDAG-NEXT: v_sub_i32_e32 v37, vcc, v12, v16
+; SDAG-NEXT: v_or_b32_e32 v11, v7, v5
+; SDAG-NEXT: v_add_i32_e64 v12, s[4:5], 32, v19
+; SDAG-NEXT: v_ffbh_u32_e32 v19, v5
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], 64, v18
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v36, vcc, v13, v16, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; SDAG-NEXT: v_ffbh_u32_e32 v11, v37
+; SDAG-NEXT: v_min_u32_e32 v12, v12, v19
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v20, 0, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v10, vcc, v14, v16, vcc
+; SDAG-NEXT: v_add_i32_e64 v13, s[8:9], 32, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v14, v36
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v18, v12, s[6:7]
+; SDAG-NEXT: v_subb_u32_e32 v11, vcc, v15, v16, vcc
+; SDAG-NEXT: v_or_b32_e32 v12, v37, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v15, v10
+; SDAG-NEXT: v_min_u32_e32 v14, v13, v14
+; SDAG-NEXT: v_or_b32_e32 v13, v36, v11
+; SDAG-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; SDAG-NEXT: v_ffbh_u32_e32 v16, v11
+; SDAG-NEXT: v_add_i32_e32 v14, vcc, 64, v14
+; SDAG-NEXT: v_addc_u32_e64 v20, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; SDAG-NEXT: v_min_u32_e32 v12, v15, v16
+; SDAG-NEXT: v_cmp_ne_u64_e64 s[6:7], 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, v20, 0, s[6:7]
+; SDAG-NEXT: s_or_b64 s[8:9], vcc, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, v14, v12, s[6:7]
+; SDAG-NEXT: v_sub_i32_e32 v12, vcc, v12, v18
+; SDAG-NEXT: v_subb_u32_e32 v13, vcc, v13, v19, vcc
+; SDAG-NEXT: v_xor_b32_e32 v16, 0x7f, v12
+; SDAG-NEXT: v_subbrev_u32_e32 v14, vcc, 0, v17, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[10:11], v[12:13]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v15, vcc, 0, v17, vcc
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v14
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v13, v15
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v18
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16
+; SDAG-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v5, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v4, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v7, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v6, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v38, vcc, 1, v12
+; SDAG-NEXT: v_sub_i32_e64 v18, s[4:5], 63, v12
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, 0, v13, vcc
+; SDAG-NEXT: v_lshl_b64 v[18:19], v[6:7], v18
+; SDAG-NEXT: v_addc_u32_e32 v48, vcc, 0, v14, vcc
+; SDAG-NEXT: v_addc_u32_e32 v49, vcc, 0, v15, vcc
+; SDAG-NEXT: v_or_b32_e32 v13, v38, v48
+; SDAG-NEXT: v_sub_i32_e32 v15, vcc, 0x7f, v12
+; SDAG-NEXT: v_or_b32_e32 v14, v39, v49
+; SDAG-NEXT: v_lshl_b64 v[20:21], v[4:5], v15
+; SDAG-NEXT: v_sub_i32_e32 v12, vcc, 64, v15
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[6:7], v15
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[13:14]
+; SDAG-NEXT: v_lshr_b64 v[12:13], v[6:7], v12
+; SDAG-NEXT: v_or_b32_e32 v13, v21, v13
+; SDAG-NEXT: v_or_b32_e32 v12, v20, v12
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v15
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v19, v13, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v18, v12, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v22, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v15
+; SDAG-NEXT: v_cndmask_b32_e64 v15, v14, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v18, v4, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[6:7], v38
+; SDAG-NEXT: v_sub_i32_e32 v24, vcc, 64, v38
+; SDAG-NEXT: v_subrev_i32_e32 v51, vcc, 64, v38
+; SDAG-NEXT: v_lshr_b64 v[22:23], v[4:5], v38
+; SDAG-NEXT: v_add_i32_e32 v50, vcc, -1, v37
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[4:5], v24
+; SDAG-NEXT: v_lshr_b64 v[53:54], v[4:5], v51
+; SDAG-NEXT: v_addc_u32_e32 v51, vcc, -1, v36, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v25
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v24
+; SDAG-NEXT: v_addc_u32_e32 v52, vcc, -1, v10, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v38
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v54, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v53, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v25, 0, v23, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v24, 0, v22, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v53, vcc, -1, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v38
+; SDAG-NEXT: v_cndmask_b32_e32 v23, v17, v7, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v22, v16, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v17, 0
+; SDAG-NEXT: .LBB2_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[24:25], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v16, 31, v23
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v54, 31, v15
+; SDAG-NEXT: v_lshl_b64 v[14:15], v[14:15], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v55, 31, v13
+; SDAG-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
+; SDAG-NEXT: v_or_b32_e32 v24, v24, v16
+; SDAG-NEXT: v_or_b32_e32 v22, v22, v54
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v55
+; SDAG-NEXT: v_or_b32_e32 v15, v19, v15
+; SDAG-NEXT: v_or_b32_e32 v13, v21, v13
+; SDAG-NEXT: v_or_b32_e32 v14, v18, v14
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v50, v22
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v51, v23, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v52, v24, vcc
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v53, v25, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v21, 31, v16
+; SDAG-NEXT: v_and_b32_e32 v16, 1, v21
+; SDAG-NEXT: v_and_b32_e32 v54, v21, v11
+; SDAG-NEXT: v_and_b32_e32 v55, v21, v10
+; SDAG-NEXT: v_and_b32_e32 v40, v21, v36
+; SDAG-NEXT: v_and_b32_e32 v21, v21, v37
+; SDAG-NEXT: v_sub_i32_e32 v22, vcc, v22, v21
+; SDAG-NEXT: v_subb_u32_e32 v23, vcc, v23, v40, vcc
+; SDAG-NEXT: v_subb_u32_e32 v24, vcc, v24, v55, vcc
+; SDAG-NEXT: v_subb_u32_e32 v25, vcc, v25, v54, vcc
+; SDAG-NEXT: v_add_i32_e32 v38, vcc, -1, v38
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, -1, v39, vcc
+; SDAG-NEXT: v_addc_u32_e32 v48, vcc, -1, v48, vcc
+; SDAG-NEXT: v_addc_u32_e32 v49, vcc, -1, v49, vcc
+; SDAG-NEXT: v_or_b32_e32 v55, v39, v49
+; SDAG-NEXT: v_or_b32_e32 v54, v38, v48
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[54:55]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v12, v20, v12
+; SDAG-NEXT: v_mov_b32_e32 v21, v17
+; SDAG-NEXT: v_mov_b32_e32 v20, v16
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB2_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB2_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[14:15], v[14:15], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v20, 31, v13
+; SDAG-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v20
+; SDAG-NEXT: v_or_b32_e32 v19, v19, v15
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v13
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v14
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v12
+; SDAG-NEXT: .LBB2_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mul_lo_u32 v14, v33, v9
+; SDAG-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v33, v8, 0
+; SDAG-NEXT: v_mul_lo_u32 v24, v27, v8
+; SDAG-NEXT: v_mul_lo_u32 v25, v35, v31
+; SDAG-NEXT: v_mul_lo_u32 v35, v32, v30
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v31, v33, 0
+; SDAG-NEXT: v_mov_b32_e32 v15, 0
+; SDAG-NEXT: v_mul_lo_u32 v38, v16, v11
+; SDAG-NEXT: v_mad_u64_u32 v[20:21], s[4:5], v16, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v39, v17, v10
+; SDAG-NEXT: v_mul_lo_u32 v19, v19, v37
+; SDAG-NEXT: v_mul_lo_u32 v48, v18, v36
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v37, v16, 0
+; SDAG-NEXT: v_add_i32_e32 v13, vcc, v13, v14
+; SDAG-NEXT: v_mov_b32_e32 v14, v9
+; SDAG-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v30, v33, v[14:15]
+; SDAG-NEXT: v_sub_i32_e32 v2, vcc, v2, v8
+; SDAG-NEXT: v_add_i32_e64 v14, s[4:5], v21, v38
+; SDAG-NEXT: v_add_i32_e64 v13, s[4:5], v13, v24
+; SDAG-NEXT: v_mov_b32_e32 v24, v23
+; SDAG-NEXT: v_mov_b32_e32 v23, v15
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v31, v27, v[22:23]
+; SDAG-NEXT: v_xor_b32_e32 v33, v2, v28
+; SDAG-NEXT: v_add_i32_e64 v21, s[4:5], v14, v39
+; SDAG-NEXT: v_mov_b32_e32 v14, v11
+; SDAG-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v36, v16, v[14:15]
+; SDAG-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v32, v31, v[12:13]
+; SDAG-NEXT: v_mov_b32_e32 v2, v9
+; SDAG-NEXT: v_add_i32_e64 v13, s[4:5], v24, v2
+; SDAG-NEXT: v_addc_u32_e64 v14, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v2, v8
+; SDAG-NEXT: v_subb_u32_e32 v16, vcc, v3, v2, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v18, v37, v[20:21]
+; SDAG-NEXT: v_mov_b32_e32 v18, v23
+; SDAG-NEXT: v_mov_b32_e32 v23, v15
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v37, v17, v[22:23]
+; SDAG-NEXT: v_add_i32_e64 v20, s[4:5], v25, v12
+; SDAG-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v30, v27, v[13:14]
+; SDAG-NEXT: v_xor_b32_e32 v16, v16, v29
+; SDAG-NEXT: v_add_i32_e64 v3, s[4:5], v19, v3
+; SDAG-NEXT: v_add_i32_e64 v14, s[4:5], v18, v9
+; SDAG-NEXT: v_addc_u32_e64 v15, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v18, v8
+; SDAG-NEXT: v_add_i32_e64 v19, s[4:5], v35, v20
+; SDAG-NEXT: v_add_i32_e64 v3, s[4:5], v48, v3
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v36, v17, v[14:15]
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], v12, v11
+; SDAG-NEXT: v_addc_u32_e64 v12, s[4:5], v13, v19, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v0, vcc, v0, v11, vcc
+; SDAG-NEXT: v_add_i32_e64 v8, s[4:5], v8, v2
+; SDAG-NEXT: v_addc_u32_e64 v9, s[4:5], v9, v3, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; SDAG-NEXT: v_xor_b32_e32 v2, v0, v28
+; SDAG-NEXT: v_xor_b32_e32 v3, v1, v29
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v33, v28
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v16, v29, vcc
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v2, v28, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v29, vcc
+; SDAG-NEXT: v_sub_i32_e32 v6, vcc, v6, v10
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v18, vcc
+; SDAG-NEXT: v_xor_b32_e32 v6, v6, v26
+; SDAG-NEXT: v_subb_u32_e32 v4, vcc, v4, v8, vcc
+; SDAG-NEXT: v_xor_b32_e32 v7, v7, v34
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v9, vcc
+; SDAG-NEXT: v_xor_b32_e32 v8, v4, v26
+; SDAG-NEXT: v_xor_b32_e32 v9, v5, v34
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v6, v26
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v7, v34, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v8, v26, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v9, v34, vcc
+; SDAG-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
+; SDAG-NEXT: s_waitcnt vmcnt(0)
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_srem_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v28, 31, v3
+; GISEL-NEXT: v_ashrrev_i32_e32 v20, 31, v11
+; GISEL-NEXT: v_mov_b32_e32 v18, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v19, 0
+; GISEL-NEXT: v_xor_b32_e32 v0, v0, v28
+; GISEL-NEXT: v_xor_b32_e32 v1, v1, v28
+; GISEL-NEXT: v_xor_b32_e32 v2, v2, v28
+; GISEL-NEXT: v_xor_b32_e32 v3, v3, v28
+; GISEL-NEXT: v_xor_b32_e32 v8, v8, v20
+; GISEL-NEXT: v_xor_b32_e32 v9, v9, v20
+; GISEL-NEXT: v_xor_b32_e32 v10, v10, v20
+; GISEL-NEXT: v_xor_b32_e32 v11, v11, v20
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v0, v28
+; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v1, v28, vcc
+; GISEL-NEXT: v_sub_i32_e64 v30, s[4:5], v8, v20
+; GISEL-NEXT: v_subb_u32_e64 v29, s[4:5], v9, v20, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v8, vcc, v2, v28, vcc
+; GISEL-NEXT: v_subb_u32_e32 v9, vcc, v3, v28, vcc
+; GISEL-NEXT: v_subb_u32_e64 v10, vcc, v10, v20, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v11, v20, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v29
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v30
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v17
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v30, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v29, v11
+; GISEL-NEXT: v_or_b32_e32 v2, v16, v8
+; GISEL-NEXT: v_or_b32_e32 v3, v17, v9
+; GISEL-NEXT: v_add_i32_e32 v21, vcc, 32, v21
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v10
+; GISEL-NEXT: v_add_i32_e32 v23, vcc, 32, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v8
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GISEL-NEXT: v_min_u32_e32 v0, v20, v21
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v25
+; GISEL-NEXT: v_min_u32_e32 v2, v22, v23
+; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v24, v1
+; GISEL-NEXT: v_add_i32_e64 v2, s[6:7], 64, v2
+; GISEL-NEXT: v_min_u32_e32 v3, v26, v3
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v2, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v18, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v2
+; GISEL-NEXT: v_or_b32_e32 v19, v1, v3
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v22, v21, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v19, v20, v21
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v19, v18
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v31, v16, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v32, v17, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v18, v8, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v19, v9, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v31, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v32, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v33, vcc, 0, v2, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v34, vcc, 0, v3, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v20, s[4:5], 64, v24
+; GISEL-NEXT: v_sub_i32_e64 v18, s[4:5], 64, v24
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[16:17], v24
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[8:9], v24
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[16:17], v18
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[16:17], v20
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v20, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v18, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v19, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v0, v8, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v19, v1, v9, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v24, vcc, 64, v31
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v31
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[8:9], v31
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[16:17], v31
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v35, vcc, -1, v30
+; GISEL-NEXT: v_addc_u32_e32 v36, vcc, -1, v29, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[8:9], v22
+; GISEL-NEXT: v_lshr_b64 v[24:25], v[8:9], v24
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v10, vcc
+; GISEL-NEXT: v_addc_u32_e32 v38, vcc, -1, v11, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v3, v23
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v31
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v24, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v25, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v26, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v27, 0, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v31
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v2, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, v3, v17, vcc
+; GISEL-NEXT: v_mov_b32_e32 v23, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: .LBB2_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[48:49], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v24, 31, v25
+; GISEL-NEXT: v_lshrrev_b32_e32 v25, 31, v19
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; GISEL-NEXT: v_add_i32_e32 v31, vcc, -1, v31
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v26, v24
+; GISEL-NEXT: v_or_b32_e32 v3, v48, v25
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v22
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; GISEL-NEXT: v_addc_u32_e32 v34, vcc, -1, v34, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v35, v3
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v36, v49, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v31, v33
+; GISEL-NEXT: v_or_b32_e32 v1, v32, v34
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v37, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v38, v27, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v22
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v1, v0, v30
+; GISEL-NEXT: v_and_b32_e32 v25, v0, v29
+; GISEL-NEXT: v_and_b32_e32 v26, v0, v10
+; GISEL-NEXT: v_and_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, v3, v1
+; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v49, v25, vcc
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v2, v26, vcc
+; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v27, v0, vcc
+; GISEL-NEXT: v_mov_b32_e32 v0, v22
+; GISEL-NEXT: v_mov_b32_e32 v1, v23
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB2_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB2_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v20, 31, v21
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v20
+; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v32, v1, v3
+; GISEL-NEXT: .LBB2_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_ashrrev_i32_e32 v33, 31, v7
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v15
+; GISEL-NEXT: v_mov_b32_e32 v2, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_xor_b32_e32 v1, v4, v33
+; GISEL-NEXT: v_xor_b32_e32 v4, v5, v33
+; GISEL-NEXT: v_xor_b32_e32 v5, v6, v33
+; GISEL-NEXT: v_xor_b32_e32 v7, v7, v33
+; GISEL-NEXT: v_xor_b32_e32 v6, v12, v0
+; GISEL-NEXT: v_xor_b32_e32 v20, v13, v0
+; GISEL-NEXT: v_xor_b32_e32 v14, v14, v0
+; GISEL-NEXT: v_xor_b32_e32 v15, v15, v0
+; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v1, v33
+; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v4, v33, vcc
+; GISEL-NEXT: v_sub_i32_e64 v35, s[4:5], v6, v0
+; GISEL-NEXT: v_subb_u32_e64 v34, s[4:5], v20, v0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v6, vcc, v5, v33, vcc
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v7, v33, vcc
+; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v0, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v20, v34
+; GISEL-NEXT: v_ffbh_u32_e32 v21, v35
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v35, v4
+; GISEL-NEXT: v_or_b32_e32 v1, v34, v5
+; GISEL-NEXT: v_or_b32_e32 v14, v12, v6
+; GISEL-NEXT: v_or_b32_e32 v15, v13, v7
+; GISEL-NEXT: v_add_i32_e32 v21, vcc, 32, v21
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v4
+; GISEL-NEXT: v_add_i32_e32 v23, vcc, 32, v23
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[14:15]
+; GISEL-NEXT: v_min_u32_e32 v0, v20, v21
+; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v25
+; GISEL-NEXT: v_min_u32_e32 v14, v22, v23
+; GISEL-NEXT: v_add_i32_e64 v15, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
+; GISEL-NEXT: v_min_u32_e32 v1, v24, v1
+; GISEL-NEXT: v_add_i32_e64 v14, s[6:7], 64, v14
+; GISEL-NEXT: v_min_u32_e32 v15, v26, v15
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, s[4:5]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v15, v14, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v14, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v15, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v21, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v2, 0x7f, v0
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v14
+; GISEL-NEXT: v_or_b32_e32 v3, v1, v15
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v22, v21, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v20, v21
+; GISEL-NEXT: v_and_b32_e32 v20, 1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v3, v2
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v20, v12, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v2
+; GISEL-NEXT: v_cndmask_b32_e64 v21, v13, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v36, vcc, 1, v0
+; GISEL-NEXT: v_addc_u32_e64 v37, s[4:5], 0, v1, vcc
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 0x7f, v0
+; GISEL-NEXT: v_addc_u32_e64 v38, vcc, 0, v14, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v39, vcc, 0, v15, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v20, s[4:5], 64, v24
+; GISEL-NEXT: v_sub_i32_e64 v14, s[4:5], 64, v24
+; GISEL-NEXT: v_lshl_b64 v[0:1], v[12:13], v24
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[6:7], v24
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[14:15], v[12:13], v14
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[12:13], v20
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v20, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, 0, v1, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v14, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v15, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v22, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v23, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v24
+; GISEL-NEXT: v_cndmask_b32_e32 v14, v0, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v15, v1, v7, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-NEXT: v_mov_b32_e32 v2, s10
+; GISEL-NEXT: v_mov_b32_e32 v3, s11
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v24, vcc, 64, v36
+; GISEL-NEXT: v_sub_i32_e32 v22, vcc, 64, v36
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[6:7], v36
+; GISEL-NEXT: v_lshr_b64 v[2:3], v[12:13], v36
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v48, vcc, -1, v35
+; GISEL-NEXT: v_addc_u32_e32 v49, vcc, -1, v34, vcc
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[6:7], v22
+; GISEL-NEXT: v_lshr_b64 v[24:25], v[6:7], v24
+; GISEL-NEXT: v_addc_u32_e32 v50, vcc, -1, v4, vcc
+; GISEL-NEXT: v_addc_u32_e32 v51, vcc, -1, v5, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v3, v3, v23
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v36
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v24, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v25, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v26, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v27, 0, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v36
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v2, v12, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, v3, v13, vcc
+; GISEL-NEXT: v_mov_b32_e32 v23, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: .LBB2_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[52:53], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v24, 31, v25
+; GISEL-NEXT: v_lshrrev_b32_e32 v25, 31, v15
+; GISEL-NEXT: v_lshl_b64 v[14:15], v[14:15], 1
+; GISEL-NEXT: v_add_i32_e32 v36, vcc, -1, v36
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v2, v26, v24
+; GISEL-NEXT: v_or_b32_e32 v3, v52, v25
+; GISEL-NEXT: v_or_b32_e32 v14, v14, v22
+; GISEL-NEXT: v_addc_u32_e32 v38, vcc, -1, v38, vcc
+; GISEL-NEXT: v_addc_u32_e32 v39, vcc, -1, v39, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v48, v3
+; GISEL-NEXT: v_subb_u32_e32 v0, vcc, v49, v53, vcc
+; GISEL-NEXT: v_or_b32_e32 v0, v36, v38
+; GISEL-NEXT: v_or_b32_e32 v1, v37, v39
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v50, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v22, vcc, v51, v27, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v22
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v0
+; GISEL-NEXT: v_and_b32_e32 v1, v0, v35
+; GISEL-NEXT: v_and_b32_e32 v25, v0, v34
+; GISEL-NEXT: v_and_b32_e32 v26, v0, v4
+; GISEL-NEXT: v_and_b32_e32 v52, v0, v5
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, v3, v1
+; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v53, v25, vcc
+; GISEL-NEXT: v_mov_b32_e32 v0, v22
+; GISEL-NEXT: v_mov_b32_e32 v1, v23
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v2, v26, vcc
+; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v27, v52, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB2_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB2_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[20:21], 1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v21
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v14
+; GISEL-NEXT: v_or_b32_e32 v20, v0, v22
+; GISEL-NEXT: v_or_b32_e32 v21, v1, v23
+; GISEL-NEXT: .LBB2_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v30, v31, 0
+; GISEL-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v30, v18, 0
+; GISEL-NEXT: v_mul_lo_u32 v24, v30, v19
+; GISEL-NEXT: v_mul_lo_u32 v25, v29, v18
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[4:5], v35, v20, 0
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v35, v2, 0
+; GISEL-NEXT: v_mul_lo_u32 v26, v35, v3
+; GISEL-NEXT: v_mul_lo_u32 v27, v34, v2
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v29, v32, v[14:15]
+; GISEL-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v34, v21, v[22:23]
+; GISEL-NEXT: v_mov_b32_e32 v22, v19
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v31, v[2:3]
+; GISEL-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v4, v20, v[14:15]
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v30, v32, v[1:2]
+; GISEL-NEXT: v_mov_b32_e32 v23, v14
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v35, v21, v[22:23]
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v29, v31, v[1:2]
+; GISEL-NEXT: v_addc_u32_e64 v3, s[6:7], v3, v24, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[6:7], v34, v20, v[22:23]
+; GISEL-NEXT: v_addc_u32_e64 v14, s[6:7], v15, v26, s[6:7]
+; GISEL-NEXT: v_addc_u32_e32 v3, vcc, v3, v25, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v16, v0
+; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v17, v1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v15, v0, v28
+; GISEL-NEXT: v_addc_u32_e64 v0, s[4:5], v14, v27, s[4:5]
+; GISEL-NEXT: v_sub_i32_e64 v12, s[4:5], v12, v18
+; GISEL-NEXT: v_subb_u32_e64 v14, s[4:5], v13, v22, s[4:5]
+; GISEL-NEXT: v_xor_b32_e32 v16, v12, v33
+; GISEL-NEXT: v_mad_u64_u32 v[12:13], s[6:7], v10, v32, v[3:4]
+; GISEL-NEXT: v_xor_b32_e32 v1, v1, v28
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v4, v21, v[0:1]
+; GISEL-NEXT: v_xor_b32_e32 v14, v14, v33
+; GISEL-NEXT: v_mad_u64_u32 v[10:11], s[6:7], v11, v31, v[12:13]
+; GISEL-NEXT: v_sub_i32_e64 v0, s[6:7], v15, v28
+; GISEL-NEXT: v_subb_u32_e64 v1, s[6:7], v1, v28, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[8:9], v5, v20, v[3:4]
+; GISEL-NEXT: v_sub_i32_e64 v4, s[8:9], v16, v33
+; GISEL-NEXT: v_subb_u32_e64 v5, s[8:9], v14, v33, s[8:9]
+; GISEL-NEXT: v_subb_u32_e32 v2, vcc, v8, v2, vcc
+; GISEL-NEXT: v_subb_u32_e32 v8, vcc, v9, v10, vcc
+; GISEL-NEXT: v_xor_b32_e32 v2, v2, v28
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v6, v23, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v7, v3, vcc
+; GISEL-NEXT: v_xor_b32_e32 v6, v6, v33
+; GISEL-NEXT: v_xor_b32_e32 v7, v8, v28
+; GISEL-NEXT: v_xor_b32_e32 v8, v3, v33
+; GISEL-NEXT: v_subb_u32_e64 v2, vcc, v2, v28, s[6:7]
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v7, v28, vcc
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v6, v33, s[8:9]
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v8, v33, vcc
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = srem <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
define <2 x i128> @v_urem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
+; SDAG-LABEL: v_urem_v2i128_vv:
+; SDAG: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v17, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v16, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v19, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v18, v0, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v10
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v11
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v8
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v9
+; SDAG-NEXT: v_ffbh_u32_e32 v24, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v25, v3
+; SDAG-NEXT: v_ffbh_u32_e32 v26, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v27, v1
+; SDAG-NEXT: v_mov_b32_e32 v28, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; SDAG-NEXT: v_add_i32_e64 v16, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v17, s[6:7], 32, v22
+; SDAG-NEXT: v_add_i32_e64 v18, s[6:7], 32, v24
+; SDAG-NEXT: v_add_i32_e64 v19, s[6:7], 32, v26
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v16, v16, v21
+; SDAG-NEXT: v_min_u32_e32 v17, v17, v23
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v25
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v27
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, 64, v17
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v20, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v16, v18
+; SDAG-NEXT: v_subb_u32_e32 v17, vcc, v20, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v18, 0x7f, v16
+; SDAG-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v28, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[16:17]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v21, vcc, 0, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v20
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v19, v17, v21
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_cndmask_b32_e32 v22, v23, v22, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_and_b32_e32 v18, 1, v22
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v18
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v33, v3, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v31, v2, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v30, v1, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v32, v0, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_6
+; SDAG-NEXT: ; %bb.1: ; %udiv-bb15
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, 1, v16
+; SDAG-NEXT: v_sub_i32_e64 v22, s[4:5], 63, v16
+; SDAG-NEXT: v_mov_b32_e32 v18, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, 0, v17, vcc
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[0:1], v22
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, 0, v20, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, 0, v21, vcc
+; SDAG-NEXT: v_or_b32_e32 v20, v30, v32
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, 0x7f, v16
+; SDAG-NEXT: v_or_b32_e32 v21, v31, v33
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[2:3], v26
+; SDAG-NEXT: v_sub_i32_e32 v27, vcc, 64, v26
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[0:1], v26
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_lshr_b64 v[20:21], v[0:1], v27
+; SDAG-NEXT: v_or_b32_e32 v17, v17, v21
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v20
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v26
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v23, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v22, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, v25, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, 0, v24, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v26
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v17, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, v16, v2, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_5
+; SDAG-NEXT: ; %bb.2: ; %udiv-preheader4
+; SDAG-NEXT: v_lshr_b64 v[18:19], v[0:1], v30
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, 64, v30
+; SDAG-NEXT: v_subrev_i32_e32 v35, vcc, 64, v30
+; SDAG-NEXT: v_lshr_b64 v[26:27], v[2:3], v30
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v8
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v24, 0
+; SDAG-NEXT: v_mov_b32_e32 v25, 0
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[2:3], v28
+; SDAG-NEXT: v_lshr_b64 v[37:38], v[2:3], v35
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v9, vcc
+; SDAG-NEXT: v_or_b32_e32 v19, v19, v29
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v28
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v10, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v30
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v38, v19, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v37, v18, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v29, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v28, 0, v26, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; SDAG-NEXT: v_cndmask_b32_e32 v27, v19, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v26, v18, v0, vcc
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: .LBB3_3: ; %udiv-do-while3
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[28:29], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v18, 31, v27
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v38, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v39, 31, v23
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; SDAG-NEXT: v_or_b32_e32 v28, v28, v18
+; SDAG-NEXT: v_or_b32_e32 v26, v26, v38
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v39
+; SDAG-NEXT: v_or_b32_e32 v17, v21, v17
+; SDAG-NEXT: v_sub_i32_e32 v18, vcc, v34, v26
+; SDAG-NEXT: v_or_b32_e32 v16, v20, v16
+; SDAG-NEXT: v_subb_u32_e32 v18, vcc, v35, v27, vcc
+; SDAG-NEXT: v_subb_u32_e32 v18, vcc, v36, v28, vcc
+; SDAG-NEXT: v_subb_u32_e32 v18, vcc, v37, v29, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v38, 31, v18
+; SDAG-NEXT: v_and_b32_e32 v39, v38, v8
+; SDAG-NEXT: v_and_b32_e32 v48, v38, v9
+; SDAG-NEXT: v_and_b32_e32 v49, v38, v10
+; SDAG-NEXT: v_and_b32_e32 v18, 1, v38
+; SDAG-NEXT: v_and_b32_e32 v38, v38, v11
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, v26, v39
+; SDAG-NEXT: v_subb_u32_e32 v27, vcc, v27, v48, vcc
+; SDAG-NEXT: v_subb_u32_e32 v28, vcc, v28, v49, vcc
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v29, v38, vcc
+; SDAG-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; SDAG-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; SDAG-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; SDAG-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; SDAG-NEXT: v_or_b32_e32 v38, v30, v32
+; SDAG-NEXT: v_or_b32_e32 v39, v31, v33
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[38:39]
+; SDAG-NEXT: v_or_b32_e32 v23, v25, v23
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v22, v24, v22
+; SDAG-NEXT: v_mov_b32_e32 v25, v19
+; SDAG-NEXT: v_mov_b32_e32 v24, v18
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB3_3
+; SDAG-NEXT: ; %bb.4: ; %Flow13
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB3_5: ; %Flow14
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v24, 31, v23
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; SDAG-NEXT: v_or_b32_e32 v16, v16, v24
+; SDAG-NEXT: v_or_b32_e32 v33, v21, v17
+; SDAG-NEXT: v_or_b32_e32 v30, v19, v23
+; SDAG-NEXT: v_or_b32_e32 v31, v20, v16
+; SDAG-NEXT: v_or_b32_e32 v32, v18, v22
+; SDAG-NEXT: .LBB3_6: ; %Flow16
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_or_b32_e32 v17, v13, v15
+; SDAG-NEXT: v_or_b32_e32 v16, v12, v14
+; SDAG-NEXT: v_or_b32_e32 v19, v5, v7
+; SDAG-NEXT: v_or_b32_e32 v18, v4, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v20, v14
+; SDAG-NEXT: v_ffbh_u32_e32 v21, v15
+; SDAG-NEXT: v_ffbh_u32_e32 v22, v12
+; SDAG-NEXT: v_ffbh_u32_e32 v23, v13
+; SDAG-NEXT: v_ffbh_u32_e32 v24, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v25, v7
+; SDAG-NEXT: v_ffbh_u32_e32 v26, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v27, v5
+; SDAG-NEXT: v_mov_b32_e32 v28, 0
+; SDAG-NEXT: s_mov_b64 s[8:9], 0x7f
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; SDAG-NEXT: v_add_i32_e64 v16, s[6:7], 32, v20
+; SDAG-NEXT: v_add_i32_e64 v17, s[6:7], 32, v22
+; SDAG-NEXT: v_add_i32_e64 v18, s[6:7], 32, v24
+; SDAG-NEXT: v_add_i32_e64 v19, s[6:7], 32, v26
+; SDAG-NEXT: s_or_b64 s[6:7], vcc, s[4:5]
+; SDAG-NEXT: v_min_u32_e32 v16, v16, v21
+; SDAG-NEXT: v_min_u32_e32 v17, v17, v23
+; SDAG-NEXT: v_min_u32_e32 v18, v18, v25
+; SDAG-NEXT: v_min_u32_e32 v19, v19, v27
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, 64, v17
+; SDAG-NEXT: v_addc_u32_e64 v20, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_add_i32_e32 v19, vcc, 64, v19
+; SDAG-NEXT: v_addc_u32_e64 v21, s[4:5], 0, 0, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[14:15]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v20, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, v21, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, v16, v18
+; SDAG-NEXT: v_subb_u32_e32 v17, vcc, v20, v17, vcc
+; SDAG-NEXT: v_xor_b32_e32 v20, 0x7f, v16
+; SDAG-NEXT: v_subbrev_u32_e32 v18, vcc, 0, v28, vcc
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[8:9], v[16:17]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; SDAG-NEXT: v_subbrev_u32_e32 v19, vcc, 0, v28, vcc
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v18
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v21, v17, v19
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; SDAG-NEXT: v_cndmask_b32_e32 v22, v23, v22, vcc
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[20:21]
+; SDAG-NEXT: v_and_b32_e32 v20, 1, v22
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v20
+; SDAG-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v23, v7, 0, s[4:5]
+; SDAG-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; SDAG-NEXT: v_cndmask_b32_e64 v22, v6, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v5, 0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v4, 0, s[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_12
+; SDAG-NEXT: ; %bb.7: ; %udiv-bb1
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, 1, v16
+; SDAG-NEXT: v_sub_i32_e64 v22, s[4:5], 63, v16
+; SDAG-NEXT: v_mov_b32_e32 v20, 0
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, 0, v17, vcc
+; SDAG-NEXT: v_lshl_b64 v[22:23], v[4:5], v22
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, 0, v18, vcc
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, 0, v19, vcc
+; SDAG-NEXT: v_or_b32_e32 v17, v34, v36
+; SDAG-NEXT: v_sub_i32_e32 v19, vcc, 0x7f, v16
+; SDAG-NEXT: v_or_b32_e32 v18, v35, v37
+; SDAG-NEXT: v_lshl_b64 v[24:25], v[6:7], v19
+; SDAG-NEXT: v_sub_i32_e32 v16, vcc, 64, v19
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[4:5], v19
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[17:18]
+; SDAG-NEXT: v_lshr_b64 v[16:17], v[4:5], v16
+; SDAG-NEXT: v_or_b32_e32 v17, v25, v17
+; SDAG-NEXT: v_or_b32_e32 v16, v24, v16
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v19
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v23, v17, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v22, v22, v16, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v17, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v16, 0, v26, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v19
+; SDAG-NEXT: v_cndmask_b32_e64 v19, v18, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v18, v22, v6, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_11
+; SDAG-NEXT: ; %bb.8: ; %udiv-preheader
+; SDAG-NEXT: v_lshr_b64 v[20:21], v[4:5], v34
+; SDAG-NEXT: v_sub_i32_e32 v28, vcc, 64, v34
+; SDAG-NEXT: v_subrev_i32_e32 v39, vcc, 64, v34
+; SDAG-NEXT: v_lshr_b64 v[26:27], v[6:7], v34
+; SDAG-NEXT: v_add_i32_e32 v38, vcc, -1, v12
+; SDAG-NEXT: s_mov_b64 s[10:11], 0
+; SDAG-NEXT: v_mov_b32_e32 v24, 0
+; SDAG-NEXT: v_mov_b32_e32 v25, 0
+; SDAG-NEXT: v_mov_b32_e32 v22, 0
+; SDAG-NEXT: v_mov_b32_e32 v23, 0
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[6:7], v28
+; SDAG-NEXT: v_lshr_b64 v[49:50], v[6:7], v39
+; SDAG-NEXT: v_addc_u32_e32 v39, vcc, -1, v13, vcc
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v29
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v28
+; SDAG-NEXT: v_addc_u32_e32 v48, vcc, -1, v14, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v34
+; SDAG-NEXT: v_cndmask_b32_e64 v21, v50, v21, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v20, v49, v20, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v29, 0, v27, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v28, 0, v26, s[4:5]
+; SDAG-NEXT: v_addc_u32_e32 v49, vcc, -1, v15, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v34
+; SDAG-NEXT: v_cndmask_b32_e32 v27, v21, v5, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v26, v20, v4, vcc
+; SDAG-NEXT: v_mov_b32_e32 v21, 0
+; SDAG-NEXT: .LBB3_9: ; %udiv-do-while
+; SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; SDAG-NEXT: v_lshl_b64 v[28:29], v[28:29], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v20, 31, v27
+; SDAG-NEXT: v_lshl_b64 v[26:27], v[26:27], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v50, 31, v19
+; SDAG-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v51, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_or_b32_e32 v28, v28, v20
+; SDAG-NEXT: v_or_b32_e32 v26, v26, v50
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v51
+; SDAG-NEXT: v_or_b32_e32 v19, v23, v19
+; SDAG-NEXT: v_or_b32_e32 v17, v25, v17
+; SDAG-NEXT: v_or_b32_e32 v18, v22, v18
+; SDAG-NEXT: v_sub_i32_e32 v20, vcc, v38, v26
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v39, v27, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v48, v28, vcc
+; SDAG-NEXT: v_subb_u32_e32 v20, vcc, v49, v29, vcc
+; SDAG-NEXT: v_ashrrev_i32_e32 v25, 31, v20
+; SDAG-NEXT: v_and_b32_e32 v20, 1, v25
+; SDAG-NEXT: v_and_b32_e32 v50, v25, v15
+; SDAG-NEXT: v_and_b32_e32 v51, v25, v14
+; SDAG-NEXT: v_and_b32_e32 v52, v25, v13
+; SDAG-NEXT: v_and_b32_e32 v25, v25, v12
+; SDAG-NEXT: v_sub_i32_e32 v26, vcc, v26, v25
+; SDAG-NEXT: v_subb_u32_e32 v27, vcc, v27, v52, vcc
+; SDAG-NEXT: v_subb_u32_e32 v28, vcc, v28, v51, vcc
+; SDAG-NEXT: v_subb_u32_e32 v29, vcc, v29, v50, vcc
+; SDAG-NEXT: v_add_i32_e32 v34, vcc, -1, v34
+; SDAG-NEXT: v_addc_u32_e32 v35, vcc, -1, v35, vcc
+; SDAG-NEXT: v_addc_u32_e32 v36, vcc, -1, v36, vcc
+; SDAG-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
+; SDAG-NEXT: v_or_b32_e32 v51, v35, v37
+; SDAG-NEXT: v_or_b32_e32 v50, v34, v36
+; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[50:51]
+; SDAG-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; SDAG-NEXT: v_or_b32_e32 v16, v24, v16
+; SDAG-NEXT: v_mov_b32_e32 v25, v21
+; SDAG-NEXT: v_mov_b32_e32 v24, v20
+; SDAG-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; SDAG-NEXT: s_cbranch_execnz .LBB3_9
+; SDAG-NEXT: ; %bb.10: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[10:11]
+; SDAG-NEXT: .LBB3_11: ; %Flow11
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
+; SDAG-NEXT: v_lshrrev_b32_e32 v24, 31, v17
+; SDAG-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
+; SDAG-NEXT: v_or_b32_e32 v18, v18, v24
+; SDAG-NEXT: v_or_b32_e32 v23, v23, v19
+; SDAG-NEXT: v_or_b32_e32 v21, v21, v17
+; SDAG-NEXT: v_or_b32_e32 v22, v22, v18
+; SDAG-NEXT: v_or_b32_e32 v20, v20, v16
+; SDAG-NEXT: .LBB3_12: ; %Flow12
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mul_lo_u32 v18, v32, v11
+; SDAG-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v32, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v28, v30, v10
+; SDAG-NEXT: v_mul_lo_u32 v29, v33, v8
+; SDAG-NEXT: v_mul_lo_u32 v33, v31, v9
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v8, v32, 0
+; SDAG-NEXT: v_mov_b32_e32 v19, 0
+; SDAG-NEXT: v_mul_lo_u32 v34, v20, v15
+; SDAG-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v20, v14, 0
+; SDAG-NEXT: v_mul_lo_u32 v35, v21, v14
+; SDAG-NEXT: v_mul_lo_u32 v23, v23, v12
+; SDAG-NEXT: v_mul_lo_u32 v36, v22, v13
+; SDAG-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v12, v20, 0
+; SDAG-NEXT: v_add_i32_e32 v17, vcc, v17, v18
+; SDAG-NEXT: v_mov_b32_e32 v18, v11
+; SDAG-NEXT: v_mad_u64_u32 v[26:27], s[4:5], v9, v32, v[18:19]
+; SDAG-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; SDAG-NEXT: v_add_i32_e64 v18, s[4:5], v25, v34
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v17, v28
+; SDAG-NEXT: v_mov_b32_e32 v28, v27
+; SDAG-NEXT: v_mov_b32_e32 v27, v19
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v8, v30, v[26:27]
+; SDAG-NEXT: v_add_i32_e64 v25, s[4:5], v18, v35
+; SDAG-NEXT: v_mov_b32_e32 v18, v15
+; SDAG-NEXT: v_mad_u64_u32 v[26:27], s[4:5], v13, v20, v[18:19]
+; SDAG-NEXT: v_mad_u64_u32 v[15:16], s[4:5], v31, v8, v[16:17]
+; SDAG-NEXT: v_mov_b32_e32 v8, v11
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v28, v8
+; SDAG-NEXT: v_addc_u32_e64 v18, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v8, v10
+; SDAG-NEXT: v_subb_u32_e32 v1, vcc, v1, v8, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v22, v12, v[24:25]
+; SDAG-NEXT: v_mov_b32_e32 v22, v27
+; SDAG-NEXT: v_mov_b32_e32 v27, v19
+; SDAG-NEXT: v_mad_u64_u32 v[19:20], s[4:5], v12, v21, v[26:27]
+; SDAG-NEXT: v_add_i32_e64 v16, s[4:5], v29, v16
+; SDAG-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v9, v30, v[17:18]
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v23, v11
+; SDAG-NEXT: v_mov_b32_e32 v11, v20
+; SDAG-NEXT: v_add_i32_e64 v11, s[4:5], v22, v11
+; SDAG-NEXT: v_addc_u32_e64 v12, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_add_i32_e64 v16, s[4:5], v33, v16
+; SDAG-NEXT: v_add_i32_e64 v17, s[4:5], v36, v17
+; SDAG-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v13, v21, v[11:12]
+; SDAG-NEXT: v_add_i32_e64 v8, s[4:5], v8, v15
+; SDAG-NEXT: v_addc_u32_e64 v9, s[4:5], v9, v16, s[4:5]
+; SDAG-NEXT: v_subb_u32_e32 v2, vcc, v2, v8, vcc
+; SDAG-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; SDAG-NEXT: v_add_i32_e32 v8, vcc, v11, v10
+; SDAG-NEXT: v_addc_u32_e32 v9, vcc, v12, v17, vcc
+; SDAG-NEXT: v_mov_b32_e32 v10, v19
+; SDAG-NEXT: v_sub_i32_e32 v4, vcc, v4, v14
+; SDAG-NEXT: v_subb_u32_e32 v5, vcc, v5, v10, vcc
+; SDAG-NEXT: v_subb_u32_e32 v6, vcc, v6, v8, vcc
+; SDAG-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: v_urem_v2i128_vv:
+; GISEL: ; %bb.0: ; %_udiv-special-cases_udiv-special-cases
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v16, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v17, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v18, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v19, v1, v3
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v9
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v8
+; GISEL-NEXT: v_ffbh_u32_e32 v24, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v25, v10
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v1
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v3
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v2
+; GISEL-NEXT: v_mov_b32_e32 v20, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v21, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; GISEL-NEXT: v_add_i32_e64 v16, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v17, s[6:7], 32, v25
+; GISEL-NEXT: v_add_i32_e64 v18, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v19, s[6:7], 32, v29
+; GISEL-NEXT: v_min_u32_e32 v16, v22, v16
+; GISEL-NEXT: v_min_u32_e32 v17, v24, v17
+; GISEL-NEXT: v_min_u32_e32 v18, v26, v18
+; GISEL-NEXT: v_min_u32_e32 v19, v28, v19
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v16, vcc, 64, v16
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, 64, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v19, v18, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v16, v17
+; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v18, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v19, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[16:17], v[20:21]
+; GISEL-NEXT: v_cndmask_b32_e64 v23, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v20, 0x7f, v16
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v24, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v18
+; GISEL-NEXT: v_or_b32_e32 v21, v17, v19
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e32 v23, v24, v23, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
+; GISEL-NEXT: v_cndmask_b32_e64 v20, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v21, v22, v23
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v21
+; GISEL-NEXT: v_or_b32_e32 v20, v21, v20
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: v_cndmask_b32_e64 v32, v0, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v22, 1, v20
+; GISEL-NEXT: v_cndmask_b32_e64 v33, v1, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v20, v2, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v21, v3, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_6
+; GISEL-NEXT: ; %bb.1: ; %udiv-bb15
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, 1, v16
+; GISEL-NEXT: v_addc_u32_e64 v31, s[4:5], 0, v17, vcc
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, 0x7f, v16
+; GISEL-NEXT: v_addc_u32_e64 v32, vcc, 0, v18, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, 0, v19, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v22, s[4:5], 64, v26
+; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], 64, v26
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[0:1], v26
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[2:3], v26
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[20:21], v[0:1], v20
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[0:1], v22
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v22, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v23, 0, v17, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v20, v18
+; GISEL-NEXT: v_or_b32_e32 v17, v21, v19
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v24, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v25, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
+; GISEL-NEXT: v_cndmask_b32_e32 v20, v16, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v21, v17, v3, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v19, s11
+; GISEL-NEXT: v_mov_b32_e32 v18, s10
+; GISEL-NEXT: v_mov_b32_e32 v17, s9
+; GISEL-NEXT: v_mov_b32_e32 v16, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_5
+; GISEL-NEXT: ; %bb.2: ; %udiv-preheader4
+; GISEL-NEXT: v_subrev_i32_e32 v26, vcc, 64, v30
+; GISEL-NEXT: v_sub_i32_e32 v24, vcc, 64, v30
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[2:3], v30
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[0:1], v30
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v34, vcc, -1, v8
+; GISEL-NEXT: v_addc_u32_e32 v35, vcc, -1, v9, vcc
+; GISEL-NEXT: v_lshl_b64 v[24:25], v[2:3], v24
+; GISEL-NEXT: v_lshr_b64 v[26:27], v[2:3], v26
+; GISEL-NEXT: v_addc_u32_e32 v36, vcc, -1, v10, vcc
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v11, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v24
+; GISEL-NEXT: v_or_b32_e32 v19, v19, v25
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v26, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v19, v27, v19, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v28, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v29, 0, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
+; GISEL-NEXT: v_cndmask_b32_e32 v26, v18, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v27, v19, v1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v25, 0
+; GISEL-NEXT: v_mov_b32_e32 v19, s7
+; GISEL-NEXT: v_mov_b32_e32 v18, s6
+; GISEL-NEXT: v_mov_b32_e32 v17, s5
+; GISEL-NEXT: v_mov_b32_e32 v16, s4
+; GISEL-NEXT: .LBB3_3: ; %udiv-do-while3
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v24, 31, v23
+; GISEL-NEXT: v_lshl_b64 v[38:39], v[26:27], 1
+; GISEL-NEXT: v_lshl_b64 v[28:29], v[28:29], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v26, 31, v27
+; GISEL-NEXT: v_lshrrev_b32_e32 v27, 31, v21
+; GISEL-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v30
+; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v31, vcc
+; GISEL-NEXT: v_or_b32_e32 v22, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v23, v17, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v28, v26
+; GISEL-NEXT: v_or_b32_e32 v19, v38, v27
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v24
+; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
+; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v34, v19
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v35, v39, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v30, v32
+; GISEL-NEXT: v_or_b32_e32 v17, v31, v33
+; GISEL-NEXT: v_subb_u32_e32 v24, vcc, v36, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v24, vcc, v37, v29, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_ashrrev_i32_e32 v16, 31, v24
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v24, 1, v16
+; GISEL-NEXT: v_and_b32_e32 v17, v16, v8
+; GISEL-NEXT: v_and_b32_e32 v27, v16, v9
+; GISEL-NEXT: v_and_b32_e32 v28, v16, v10
+; GISEL-NEXT: v_and_b32_e32 v16, v16, v11
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, v19, v17
+; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v39, v27, vcc
+; GISEL-NEXT: v_subb_u32_e32 v28, vcc, v18, v28, vcc
+; GISEL-NEXT: v_subb_u32_e32 v29, vcc, v29, v16, vcc
+; GISEL-NEXT: v_mov_b32_e32 v16, v24
+; GISEL-NEXT: v_mov_b32_e32 v17, v25
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB3_3
+; GISEL-NEXT: ; %bb.4: ; %Flow13
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB3_5: ; %Flow14
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
+; GISEL-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v23
+; GISEL-NEXT: v_or_b32_e32 v20, v20, v22
+; GISEL-NEXT: v_or_b32_e32 v32, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v33, v17, v19
+; GISEL-NEXT: .LBB3_6: ; %Flow16
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_mov_b64 s[8:9], 0
+; GISEL-NEXT: v_or_b32_e32 v16, v12, v14
+; GISEL-NEXT: v_or_b32_e32 v17, v13, v15
+; GISEL-NEXT: v_or_b32_e32 v18, v4, v6
+; GISEL-NEXT: v_or_b32_e32 v19, v5, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v22, v13
+; GISEL-NEXT: v_ffbh_u32_e32 v23, v12
+; GISEL-NEXT: v_ffbh_u32_e32 v26, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v27, v14
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v4
+; GISEL-NEXT: v_ffbh_u32_e32 v30, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v31, v6
+; GISEL-NEXT: v_mov_b32_e32 v24, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v25, 0
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[18:19]
+; GISEL-NEXT: v_add_i32_e64 v16, s[6:7], 32, v23
+; GISEL-NEXT: v_add_i32_e64 v17, s[6:7], 32, v27
+; GISEL-NEXT: v_add_i32_e64 v18, s[6:7], 32, v29
+; GISEL-NEXT: v_add_i32_e64 v19, s[6:7], 32, v31
+; GISEL-NEXT: v_min_u32_e32 v16, v22, v16
+; GISEL-NEXT: v_min_u32_e32 v17, v26, v17
+; GISEL-NEXT: v_min_u32_e32 v18, v28, v18
+; GISEL-NEXT: v_min_u32_e32 v19, v30, v19
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[4:5]
+; GISEL-NEXT: v_add_i32_e32 v16, vcc, 64, v16
+; GISEL-NEXT: v_add_i32_e32 v18, vcc, 64, v18
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v19, v18, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v16, v17
+; GISEL-NEXT: v_subb_u32_e64 v17, s[4:5], 0, 0, vcc
+; GISEL-NEXT: v_subb_u32_e64 v22, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_subb_u32_e64 v23, s[4:5], 0, 0, s[4:5]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[16:17], v[24:25]
+; GISEL-NEXT: v_cndmask_b32_e64 v24, 0, 1, vcc
+; GISEL-NEXT: v_xor_b32_e32 v18, 0x7f, v16
+; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e64 v25, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v22
+; GISEL-NEXT: v_or_b32_e32 v19, v17, v23
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[22:23]
+; GISEL-NEXT: v_cndmask_b32_e32 v24, v25, v24, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GISEL-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v19, v26, v24
+; GISEL-NEXT: v_and_b32_e32 v24, 1, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v19, v18
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v24
+; GISEL-NEXT: v_cndmask_b32_e64 v24, v4, 0, vcc
+; GISEL-NEXT: v_and_b32_e32 v26, 1, v18
+; GISEL-NEXT: v_cndmask_b32_e64 v25, v5, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v18, v6, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v19, v7, 0, vcc
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_12
+; GISEL-NEXT: ; %bb.7: ; %udiv-bb1
+; GISEL-NEXT: v_add_i32_e32 v34, vcc, 1, v16
+; GISEL-NEXT: v_addc_u32_e64 v35, s[4:5], 0, v17, vcc
+; GISEL-NEXT: v_sub_i32_e32 v28, vcc, 0x7f, v16
+; GISEL-NEXT: v_addc_u32_e64 v36, vcc, 0, v22, s[4:5]
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, 0, v23, vcc
+; GISEL-NEXT: v_subrev_i32_e64 v24, s[4:5], 64, v28
+; GISEL-NEXT: v_sub_i32_e64 v22, s[4:5], 64, v28
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[4:5], v28
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[6:7], v28
+; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-NEXT: v_lshr_b64 v[22:23], v[4:5], v22
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[4:5], v24
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v28
+; GISEL-NEXT: v_cndmask_b32_e32 v24, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v25, 0, v17, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v22, v18
+; GISEL-NEXT: v_or_b32_e32 v17, v23, v19
+; GISEL-NEXT: v_cndmask_b32_e32 v16, v26, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v17, v27, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v28
+; GISEL-NEXT: v_cndmask_b32_e32 v22, v16, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v23, v17, v7, vcc
+; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v19, s11
+; GISEL-NEXT: v_mov_b32_e32 v18, s10
+; GISEL-NEXT: v_mov_b32_e32 v17, s9
+; GISEL-NEXT: v_mov_b32_e32 v16, s8
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GISEL-NEXT: s_xor_b64 s[8:9], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_11
+; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
+; GISEL-NEXT: v_subrev_i32_e32 v28, vcc, 64, v34
+; GISEL-NEXT: v_sub_i32_e32 v26, vcc, 64, v34
+; GISEL-NEXT: v_lshr_b64 v[16:17], v[6:7], v34
+; GISEL-NEXT: v_lshr_b64 v[18:19], v[4:5], v34
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_add_i32_e32 v38, vcc, -1, v12
+; GISEL-NEXT: v_addc_u32_e32 v39, vcc, -1, v13, vcc
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[6:7], v26
+; GISEL-NEXT: v_lshr_b64 v[28:29], v[6:7], v28
+; GISEL-NEXT: v_addc_u32_e32 v48, vcc, -1, v14, vcc
+; GISEL-NEXT: v_addc_u32_e32 v49, vcc, -1, v15, vcc
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v26
+; GISEL-NEXT: v_or_b32_e32 v19, v19, v27
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v34
+; GISEL-NEXT: v_cndmask_b32_e32 v18, v28, v18, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v19, v29, v19, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v30, 0, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v31, 0, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v34
+; GISEL-NEXT: v_cndmask_b32_e32 v28, v18, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v29, v19, v5, vcc
+; GISEL-NEXT: v_mov_b32_e32 v27, 0
+; GISEL-NEXT: v_mov_b32_e32 v19, s7
+; GISEL-NEXT: v_mov_b32_e32 v18, s6
+; GISEL-NEXT: v_mov_b32_e32 v17, s5
+; GISEL-NEXT: v_mov_b32_e32 v16, s4
+; GISEL-NEXT: .LBB3_9: ; %udiv-do-while
+; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[24:25], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v26, 31, v25
+; GISEL-NEXT: v_lshl_b64 v[50:51], v[28:29], 1
+; GISEL-NEXT: v_lshl_b64 v[30:31], v[30:31], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v28, 31, v29
+; GISEL-NEXT: v_lshrrev_b32_e32 v29, 31, v23
+; GISEL-NEXT: v_lshl_b64 v[22:23], v[22:23], 1
+; GISEL-NEXT: v_add_i32_e32 v34, vcc, -1, v34
+; GISEL-NEXT: v_addc_u32_e32 v35, vcc, -1, v35, vcc
+; GISEL-NEXT: v_or_b32_e32 v24, v16, v18
+; GISEL-NEXT: v_or_b32_e32 v25, v17, v19
+; GISEL-NEXT: v_or_b32_e32 v18, v30, v28
+; GISEL-NEXT: v_or_b32_e32 v19, v50, v29
+; GISEL-NEXT: v_or_b32_e32 v22, v22, v26
+; GISEL-NEXT: v_addc_u32_e32 v36, vcc, -1, v36, vcc
+; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
+; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v38, v19
+; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v39, v51, vcc
+; GISEL-NEXT: v_or_b32_e32 v16, v34, v36
+; GISEL-NEXT: v_or_b32_e32 v17, v35, v37
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v48, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v49, v31, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GISEL-NEXT: v_ashrrev_i32_e32 v16, 31, v26
+; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v26, 1, v16
+; GISEL-NEXT: v_and_b32_e32 v17, v16, v12
+; GISEL-NEXT: v_and_b32_e32 v29, v16, v13
+; GISEL-NEXT: v_and_b32_e32 v30, v16, v14
+; GISEL-NEXT: v_and_b32_e32 v50, v16, v15
+; GISEL-NEXT: v_sub_i32_e32 v28, vcc, v19, v17
+; GISEL-NEXT: v_subb_u32_e32 v29, vcc, v51, v29, vcc
+; GISEL-NEXT: v_mov_b32_e32 v16, v26
+; GISEL-NEXT: v_mov_b32_e32 v17, v27
+; GISEL-NEXT: v_subb_u32_e32 v30, vcc, v18, v30, vcc
+; GISEL-NEXT: v_subb_u32_e32 v31, vcc, v31, v50, vcc
+; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execnz .LBB3_9
+; GISEL-NEXT: ; %bb.10: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB3_11: ; %Flow11
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_b64 v[26:27], v[24:25], 1
+; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v25
+; GISEL-NEXT: v_or_b32_e32 v18, v18, v22
+; GISEL-NEXT: v_or_b32_e32 v24, v16, v26
+; GISEL-NEXT: v_or_b32_e32 v25, v17, v27
+; GISEL-NEXT: .LBB3_12: ; %Flow12
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v8, v32, 0
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v8, v20, 0
+; GISEL-NEXT: v_mul_lo_u32 v28, v8, v21
+; GISEL-NEXT: v_mul_lo_u32 v29, v9, v20
+; GISEL-NEXT: v_mad_u64_u32 v[20:21], s[4:5], v12, v24, 0
+; GISEL-NEXT: v_mad_u64_u32 v[26:27], s[4:5], v12, v18, 0
+; GISEL-NEXT: v_mul_lo_u32 v30, v12, v19
+; GISEL-NEXT: v_mul_lo_u32 v31, v13, v18
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[4:5], v9, v33, v[22:23]
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v13, v25, v[26:27]
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[4:5], v10, v32, v[18:19]
+; GISEL-NEXT: v_mad_u64_u32 v[22:23], s[4:5], v14, v24, v[22:23]
+; GISEL-NEXT: v_mad_u64_u32 v[17:18], vcc, v8, v33, v[17:18]
+; GISEL-NEXT: v_mad_u64_u32 v[21:22], s[4:5], v12, v25, v[21:22]
+; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[6:7], v9, v32, v[17:18]
+; GISEL-NEXT: v_addc_u32_e64 v17, s[6:7], v19, v28, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[12:13], s[6:7], v13, v24, v[21:22]
+; GISEL-NEXT: v_addc_u32_e64 v18, s[6:7], v23, v30, s[6:7]
+; GISEL-NEXT: v_addc_u32_e32 v17, vcc, v17, v29, vcc
+; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v16
+; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v8, vcc
+; GISEL-NEXT: v_addc_u32_e64 v8, s[4:5], v18, v31, s[4:5]
+; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v20
+; GISEL-NEXT: v_subb_u32_e64 v5, s[4:5], v5, v12, s[4:5]
+; GISEL-NEXT: v_mad_u64_u32 v[16:17], s[6:7], v10, v33, v[17:18]
+; GISEL-NEXT: v_mad_u64_u32 v[18:19], s[6:7], v14, v25, v[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[10:11], s[6:7], v11, v32, v[16:17]
+; GISEL-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v15, v24, v[18:19]
+; GISEL-NEXT: v_subb_u32_e32 v2, vcc, v2, v9, vcc
+; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v3, v10, vcc
+; GISEL-NEXT: v_subb_u32_e64 v6, vcc, v6, v13, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v7, v11, vcc
+; GISEL-NEXT: s_setpc_b64 s[30:31]
%shl = urem <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
}
diff --git a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
index 0069370..05558c5 100644
--- a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
+++ b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
@@ -42,6 +42,6 @@ attributes #0 = { "amdgpu-no-dispatch-id" }
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-no-dispatch-id" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
index 4ed1b8a..e198197 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
@@ -471,25 +471,15 @@ define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32_iee
ret void
}
-; GCN-LABEL: test_fold_canonicalize_minnum_value_from_load_f32_nnan_ieee_mode:
-; VI-FLUSH: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
-; GCN-DENORM-NOT: v_max
-; GCN-DENORM-NOT: v_mul
-
-; GCN: v_min_f32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
-; GCN-DENORM-NOT: v_max
-; GCN-DENORM-NOT: v_mul
-
-; GFX9: {{flat|global}}_store_dword
-define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32_nnan_ieee_mode(ptr addrspace(1) %arg) #1 {
- %id = tail call i32 @llvm.amdgcn.workitem.id.x()
- %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
- %load = load float, ptr addrspace(1) %gep, align 4
- %v = tail call float @llvm.minnum.f32(float %load, float 0.0)
- %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
- store float %canonicalized, ptr addrspace(1) %gep, align 4
- ret void
-}
+; define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32_nnan_ieee_mode(ptr addrspace(1) %arg) #1 {
+; %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+; %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
+; %load = load float, ptr addrspace(1) %gep, align 4
+; %v = tail call float @llvm.minnum.f32(float %load, float 0.0)
+; %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+; store float %canonicalized, ptr addrspace(1) %gep, align 4
+; ret void
+; }
; GCN-LABEL: test_fold_canonicalize_minnum_value_f32:
; GCN: v_min_f32_e32 [[V:v[0-9]+]], 0, v{{[0-9]+}}
@@ -523,32 +513,15 @@ define amdgpu_kernel void @test_fold_canonicalize_sNaN_value_f32(ptr addrspace(1
ret void
}
-; GCN-LABEL: test_fold_canonicalize_denorm_value_f32:
-; GCN: {{flat|global}}_load_dword [[VAL:v[0-9]+]]
-
-; GFX9-DENORM: v_max_f32_e32 [[QUIET:v[0-9]+]], [[VAL]], [[VAL]]
-; GFX9-DENORM: v_min_f32_e32 [[RESULT:v[0-9]+]], 0x7fffff, [[QUIET]]
-
-; GFX9-FLUSH: v_max_f32_e32 [[QUIET:v[0-9]+]], [[VAL]], [[VAL]]
-; GFX9-FLUSH: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET]]
-
-; VI-FLUSH: v_mul_f32_e32 [[QUIET_V0:v[0-9]+]], 1.0, [[VAL]]
-; VI-FLUSH: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_V0]]
-
-; VI-DENORM: v_min_f32_e32 [[RESULT:v[0-9]+]], 0x7fffff, [[VAL]]
-
-; GCN-NOT: v_mul
-; GCN-NOT: v_max
-; GCN: {{flat|global}}_store_dword v{{.+}}, [[RESULT]]
-define amdgpu_kernel void @test_fold_canonicalize_denorm_value_f32(ptr addrspace(1) %arg) {
- %id = tail call i32 @llvm.amdgcn.workitem.id.x()
- %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
- %load = load float, ptr addrspace(1) %gep, align 4
- %v = tail call float @llvm.minnum.f32(float %load, float bitcast (i32 8388607 to float))
- %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
- store float %canonicalized, ptr addrspace(1) %gep, align 4
- ret void
-}
+; define amdgpu_kernel void @test_fold_canonicalize_denorm_value_f32(ptr addrspace(1) %arg) {
+; %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+; %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id
+; %load = load float, ptr addrspace(1) %gep, align 4
+; %v = tail call float @llvm.minnum.f32(float %load, float bitcast (i32 8388607 to float))
+; %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+; store float %canonicalized, ptr addrspace(1) %gep, align 4
+; ret void
+; }
; GCN-LABEL: test_fold_canonicalize_maxnum_value_from_load_f32_ieee_mode:
; GCN: {{flat|global}}_load_dword [[VAL:v[0-9]+]]
@@ -674,10 +647,9 @@ define amdgpu_kernel void @test_fold_canonicalize_load_nnan_value_f64(ptr addrsp
}
; GCN-LABEL: {{^}}test_fold_canonicalize_load_nnan_value_f16
-; GCN: {{flat|global}}_load_ushort [[V:v[0-9]+]],
-; GCN-NOT: v_mul
-; GCN-NOT: v_max
-; GCN: {{flat|global}}_store_short v{{.+}}, [[V]]
+; GCN: {{flat|global}}_load_ushort [[V1:v[0-9]+]],
+; GCN: v_max_f16_e32 [[V2:v[0-9]+]], [[V1]], [[V1]]
+; GCN: {{flat|global}}_store_short v{{.+}}, [[V2]]
define amdgpu_kernel void @test_fold_canonicalize_load_nnan_value_f16(ptr addrspace(1) %arg, ptr addrspace(1) %out) #1 {
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds half, ptr addrspace(1) %arg, i32 %id
@@ -807,18 +779,13 @@ define half @v_test_canonicalize_extract_element_v2f16(<2 x half> %vec) {
ret half %canonicalized
}
-; GCN-LABEL: {{^}}v_test_canonicalize_insertelement_v2f16:
-; GFX9: v_mul_f16_e32
-; GFX9: v_pk_mul_f16
-; GFX9-NOT: v_max
-; GFX9-NOT: v_pk_max
-define <2 x half> @v_test_canonicalize_insertelement_v2f16(<2 x half> %vec, half %val, i32 %idx) {
- %vec.op = fmul <2 x half> %vec, <half 4.0, half 4.0>
- %ins.op = fmul half %val, 8.0
- %ins = insertelement <2 x half> %vec.op, half %ins.op, i32 %idx
- %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %ins)
- ret <2 x half> %canonicalized
-}
+; define <2 x half> @v_test_canonicalize_insertelement_v2f16(<2 x half> %vec, half %val, i32 %idx) {
+; %vec.op = fmul <2 x half> %vec, <half 4.0, half 4.0>
+; %ins.op = fmul half %val, 8.0
+; %ins = insertelement <2 x half> %vec.op, half %ins.op, i32 %idx
+; %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %ins)
+; ret <2 x half> %canonicalized
+; }
; GCN-LABEL: {{^}}v_test_canonicalize_insertelement_noncanon_vec_v2f16:
; GFX9: v_mul_f16
@@ -842,15 +809,11 @@ define <2 x half> @v_test_canonicalize_insertelement_noncanon_insval_v2f16(<2 x
ret <2 x half> %canonicalized
}
-; GCN-LABEL: {{^}}v_test_canonicalize_cvt_pkrtz:
-; GCN: s_waitcnt
-; GCN-NEXT: v_cvt_pkrtz_f16_f32 v0, v0, v1
-; GCN-NEXT: s_setpc_b64
-define <2 x half> @v_test_canonicalize_cvt_pkrtz(float %a, float %b) {
- %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float %b)
- %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %cvt)
- ret <2 x half> %canonicalized
-}
+; define <2 x half> @v_test_canonicalize_cvt_pkrtz(float %a, float %b) {
+; %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float %b)
+; %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %cvt)
+; ret <2 x half> %canonicalized
+; }
; GCN-LABEL: {{^}}v_test_canonicalize_cubeid:
; GCN: s_waitcnt
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
index 27462130..581b7b4 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
@@ -94,7 +94,6 @@ define amdgpu_kernel void @v_test_canonicalize_var_f16(ptr addrspace(1) %out) #1
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -147,7 +146,6 @@ define amdgpu_kernel void @s_test_canonicalize_var_f16(ptr addrspace(1) %out, i1
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e32 v0, s2
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -170,6 +168,35 @@ define amdgpu_kernel void @s_test_canonicalize_var_f16(ptr addrspace(1) %out, i1
ret void
}
+define half @s_test_canonicalize_arg(half %x) #1 {
+; VI-LABEL: s_test_canonicalize_arg:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_max_f16_e32 v0, v0, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_test_canonicalize_arg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; CI-LABEL: s_test_canonicalize_arg:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; CI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_test_canonicalize_arg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %canonicalized = call half @llvm.canonicalize.f16(half %x)
+ ret half %canonicalized
+}
+
define <2 x half> @v_test_canonicalize_build_vector_v2f16(half %lo, half %hi) #1 {
; VI-LABEL: v_test_canonicalize_build_vector_v2f16:
; VI: ; %bb.0:
@@ -242,7 +269,6 @@ define amdgpu_kernel void @v_test_canonicalize_fabs_var_f16(ptr addrspace(1) %ou
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -299,7 +325,6 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_f16(ptr addrspace(1
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -|v0|
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -357,7 +382,6 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_var_f16(ptr addrspace(1) %ou
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -414,7 +438,6 @@ define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_var_f16(ptr add
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -471,7 +494,6 @@ define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_fabs_var_f16(pt
; CI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f32_f16_e64 v0, -|v0|
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: buffer_store_short v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
@@ -1246,9 +1268,7 @@ define amdgpu_kernel void @v_test_canonicalize_var_v2f16(ptr addrspace(1) %out)
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1323,9 +1343,7 @@ define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2f16(ptr addrspace(1) %
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e64 v1, |v1|
; CI-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1404,9 +1422,7 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2f16(ptr addrspace
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1485,9 +1501,7 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2f16(ptr addrspace(1) %
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1551,9 +1565,7 @@ define amdgpu_kernel void @s_test_canonicalize_var_v2f16(ptr addrspace(1) %out,
; CI-NEXT: v_cvt_f32_f16_e32 v1, s2
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; CI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -2424,7 +2436,6 @@ define <2 x half> @v_test_canonicalize_reg_undef_v2f16(half %val) #1 {
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_mov_b32_e32 v1, 0x7fc00000
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_reg_undef_v2f16:
@@ -2456,8 +2467,7 @@ define <2 x half> @v_test_canonicalize_undef_reg_v2f16(half %val) #1 {
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v1, v0
; CI-NEXT: v_mov_b32_e32 v0, 0x7fc00000
; CI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2738,7 +2748,6 @@ define <4 x half> @v_test_canonicalize_reg_undef_undef_undef_v4f16(half %val) #1
; CI-NEXT: v_mov_b32_e32 v2, 0x7fc00000
; CI-NEXT: v_mov_b32_e32 v3, 0x7fc00000
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_reg_undef_undef_undef_v4f16:
@@ -2782,8 +2791,6 @@ define <4 x half> @v_test_canonicalize_reg_reg_undef_undef_v4f16(half %val0, hal
; CI-NEXT: v_mov_b32_e32 v3, 0x7fc00000
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; CI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_reg_reg_undef_undef_v4f16:
@@ -2826,13 +2833,10 @@ define <4 x half> @v_test_canonicalize_reg_undef_reg_reg_v4f16(half %val0, half
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v2
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
-; CI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; CI-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; CI-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v1
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
; CI-NEXT: v_mov_b32_e32 v1, 0x7fc00000
; CI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2878,18 +2882,18 @@ define <6 x half> @v_test_canonicalize_var_v6f16(<6 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v6f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v6f16:
@@ -2933,22 +2937,22 @@ define <8 x half> @v_test_canonicalize_var_v8f16(<8 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v8f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v8f16:
@@ -3001,30 +3005,30 @@ define <12 x half> @v_test_canonicalize_var_v12f16(<12 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v12f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
-; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v12f16:
@@ -3087,38 +3091,38 @@ define <16 x half> @v_test_canonicalize_var_v16f16(<16 x half> %val) #1 {
; CI-LABEL: v_test_canonicalize_var_v16f16:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
-; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
-; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
-; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
-; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
-; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
; CI-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_test_canonicalize_var_v16f16:
@@ -3216,68 +3220,68 @@ define <32 x half> @v_test_canonicalize_var_v32f16(<32 x half> %val) #1 {
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; CI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
-; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
-; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
-; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
-; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
-; CI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; CI-NEXT: v_cvt_f32_f16_e32 v30, v30
+; CI-NEXT: v_cvt_f32_f16_e32 v29, v29
+; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
+; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
-; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; CI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
-; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
-; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
-; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
-; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
-; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
-; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
-; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
-; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
-; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
-; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
-; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
-; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
-; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
-; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
-; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
-; CI-NEXT: v_cvt_f32_f16_e32 v29, v29
-; CI-NEXT: v_cvt_f32_f16_e32 v30, v30
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_cvt_f16_f32_e32 v31, v31
; CI-NEXT: v_cvt_f32_f16_e32 v31, v31
@@ -3456,228 +3460,354 @@ define <64 x half> @v_test_canonicalize_var_v64f16(<64 x half> %val) #1 {
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:104
+; CI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; CI-NEXT: v_or_b32_e32 v1, v1, v2
; CI-NEXT: v_cvt_f16_f32_e32 v2, v4
; CI-NEXT: v_cvt_f16_f32_e32 v4, v5
; CI-NEXT: v_cvt_f16_f32_e32 v5, v7
; CI-NEXT: v_cvt_f16_f32_e32 v7, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; CI-NEXT: v_or_b32_e32 v2, v3, v2
; CI-NEXT: v_cvt_f16_f32_e32 v3, v6
; CI-NEXT: v_cvt_f16_f32_e32 v6, v10
; CI-NEXT: v_cvt_f16_f32_e32 v9, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v18
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v16
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v3, v4, v3
; CI-NEXT: v_cvt_f16_f32_e32 v4, v8
; CI-NEXT: v_cvt_f16_f32_e32 v8, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v21
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v26
+; CI-NEXT: buffer_load_dword v14, off, s[0:3], s32
+; CI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; CI-NEXT: v_or_b32_e32 v4, v5, v4
; CI-NEXT: v_lshlrev_b32_e32 v5, 16, v6
; CI-NEXT: v_cvt_f16_f32_e32 v6, v12
; CI-NEXT: v_or_b32_e32 v5, v7, v5
; CI-NEXT: v_cvt_f16_f32_e32 v7, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; CI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; CI-NEXT: v_cvt_f16_f32_e32 v7, v7
; CI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v22
; CI-NEXT: v_or_b32_e32 v6, v7, v6
; CI-NEXT: v_lshlrev_b32_e32 v7, 16, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v8, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v19
; CI-NEXT: v_or_b32_e32 v7, v9, v7
-; CI-NEXT: v_cvt_f16_f32_e32 v9, v15
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v9, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v8
+; CI-NEXT: v_cvt_f32_f16_e32 v8, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v10, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v18
+; CI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:124
+; CI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:112
+; CI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:116
+; CI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v9, v9
; CI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v29
-; CI-NEXT: v_or_b32_e32 v8, v9, v8
+; CI-NEXT: v_or_b32_e32 v8, v10, v8
+; CI-NEXT: v_cvt_f16_f32_e32 v10, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v9
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
; CI-NEXT: v_lshlrev_b32_e32 v9, 16, v10
-; CI-NEXT: v_cvt_f16_f32_e32 v10, v20
; CI-NEXT: v_or_b32_e32 v9, v11, v9
-; CI-NEXT: v_cvt_f16_f32_e32 v11, v19
-; CI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:4
-; CI-NEXT: buffer_load_dword v17, off, s[0:3], s32
-; CI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:12
-; CI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:8
-; CI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
-; CI-NEXT: v_or_b32_e32 v10, v11, v10
-; CI-NEXT: v_lshlrev_b32_e32 v11, 16, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v12, v24
+; CI-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v22
+; CI-NEXT: v_or_b32_e32 v10, v12, v10
+; CI-NEXT: v_cvt_f16_f32_e32 v12, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v16, v11
+; CI-NEXT: v_cvt_f32_f16_e32 v11, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v13, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v30
+; CI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; CI-NEXT: v_or_b32_e32 v11, v13, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v13, v23
-; CI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:20
-; CI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:16
-; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:28
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:24
-; CI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
-; CI-NEXT: v_cvt_f16_f32_e32 v24, v30
-; CI-NEXT: v_or_b32_e32 v12, v13, v12
-; CI-NEXT: v_lshlrev_b32_e32 v13, 16, v14
-; CI-NEXT: v_or_b32_e32 v13, v15, v13
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v13, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v29
+; CI-NEXT: v_lshlrev_b32_e32 v12, 16, v13
+; CI-NEXT: v_or_b32_e32 v12, v15, v12
+; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v31
+; CI-NEXT: v_lshlrev_b32_e32 v13, 16, v17
+; CI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:128
+; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:132
+; CI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:120
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v15
; CI-NEXT: v_cvt_f16_f32_e32 v15, v27
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:36
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:32
-; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:44
-; CI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:40
+; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; CI-NEXT: s_waitcnt vmcnt(7)
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v33
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: v_or_b32_e32 v13, v16, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v32
+; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:12
; CI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; CI-NEXT: v_or_b32_e32 v14, v15, v14
-; CI-NEXT: v_lshlrev_b32_e32 v15, 16, v24
+; CI-NEXT: v_lshlrev_b32_e32 v15, 16, v22
; CI-NEXT: v_or_b32_e32 v15, v25, v15
-; CI-NEXT: s_waitcnt vmcnt(11)
-; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; CI-NEXT: s_waitcnt vmcnt(10)
-; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v21
+; CI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:96
+; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:100
+; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; CI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:64
+; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v16
+; CI-NEXT: v_or_b32_e32 v16, v24, v25
+; CI-NEXT: v_lshlrev_b32_e32 v24, 16, v27
+; CI-NEXT: v_or_b32_e32 v25, v28, v24
; CI-NEXT: s_waitcnt vmcnt(9)
; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
; CI-NEXT: s_waitcnt vmcnt(8)
; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; CI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
-; CI-NEXT: v_or_b32_e32 v16, v17, v16
-; CI-NEXT: v_lshlrev_b32_e32 v17, 16, v18
-; CI-NEXT: v_or_b32_e32 v17, v19, v17
; CI-NEXT: s_waitcnt vmcnt(7)
-; CI-NEXT: v_cvt_f16_f32_e32 v18, v20
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; CI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; CI-NEXT: v_or_b32_e32 v20, v19, v20
+; CI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:20
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:8
+; CI-NEXT: s_waitcnt vmcnt(8)
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: s_waitcnt vmcnt(7)
+; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
; CI-NEXT: s_waitcnt vmcnt(6)
-; CI-NEXT: v_cvt_f16_f32_e32 v19, v21
-; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v34
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
+; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
+; CI-NEXT: v_or_b32_e32 v17, v17, v26
+; CI-NEXT: v_add_i32_e32 v26, vcc, 0x7c, v0
+; CI-NEXT: v_or_b32_e32 v18, v27, v18
+; CI-NEXT: buffer_store_dword v17, v26, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x78, v0
+; CI-NEXT: buffer_store_dword v18, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x74, v0
+; CI-NEXT: buffer_store_dword v20, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x70, v0
+; CI-NEXT: buffer_store_dword v25, v17, s[0:3], 0 offen
+; CI-NEXT: s_waitcnt vmcnt(8)
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: s_waitcnt vmcnt(7)
; CI-NEXT: v_cvt_f16_f32_e32 v20, v22
-; CI-NEXT: s_waitcnt vmcnt(4)
-; CI-NEXT: v_cvt_f16_f32_e32 v21, v23
-; CI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; CI-NEXT: v_or_b32_e32 v18, v19, v18
-; CI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
-; CI-NEXT: v_or_b32_e32 v19, v21, v19
-; CI-NEXT: s_waitcnt vmcnt(3)
-; CI-NEXT: v_cvt_f16_f32_e32 v20, v26
-; CI-NEXT: s_waitcnt vmcnt(2)
-; CI-NEXT: v_cvt_f16_f32_e32 v21, v27
-; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v28
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v29
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:88
+; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:92
+; CI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:80
+; CI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:84
+; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:72
+; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:76
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v20, v20
+; CI-NEXT: s_waitcnt vmcnt(12)
+; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; CI-NEXT: v_cvt_f32_f16_e32 v29, v29
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
; CI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; CI-NEXT: v_or_b32_e32 v20, v21, v20
-; CI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:52
-; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:48
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:60
-; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:56
-; CI-NEXT: v_or_b32_e32 v21, v27, v21
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:132
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128
-; CI-NEXT: s_waitcnt vmcnt(5)
-; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; CI-NEXT: s_waitcnt vmcnt(4)
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
-; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: v_add_i32_e32 v21, vcc, 0x6c, v0
+; CI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen
+; CI-NEXT: v_lshlrev_b32_e32 v20, 16, v22
+; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24
+; CI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; CI-NEXT: s_waitcnt vmcnt(13)
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; CI-NEXT: s_waitcnt vmcnt(12)
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v24
+; CI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:28
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:16
+; CI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; CI-NEXT: s_waitcnt vmcnt(2)
-; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; CI-NEXT: s_waitcnt vmcnt(1)
+; CI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; CI-NEXT: v_or_b32_e32 v20, v23, v20
+; CI-NEXT: s_waitcnt vmcnt(9)
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: s_waitcnt vmcnt(8)
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v28
+; CI-NEXT: s_waitcnt vmcnt(7)
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: s_waitcnt vmcnt(4)
; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
-; CI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
-; CI-NEXT: v_or_b32_e32 v24, v25, v24
-; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x7c, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:124
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v26, v26
+; CI-NEXT: v_cvt_f32_f16_e32 v17, v17
; CI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; CI-NEXT: v_or_b32_e32 v22, v22, v23
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88
-; CI-NEXT: s_waitcnt vmcnt(2)
+; CI-NEXT: v_or_b32_e32 v23, v27, v23
+; CI-NEXT: v_add_i32_e32 v27, vcc, 0x68, v0
+; CI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen
+; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:32
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:36
+; CI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; CI-NEXT: v_cvt_f16_f32_e32 v18, v18
; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x78, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:116
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:112
-; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: v_or_b32_e32 v17, v17, v18
+; CI-NEXT: v_add_i32_e32 v18, vcc, 0x64, v0
+; CI-NEXT: v_or_b32_e32 v25, v25, v26
+; CI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x60, v0
+; CI-NEXT: buffer_store_dword v25, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x5c, v0
+; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; CI-NEXT: v_or_b32_e32 v19, v24, v19
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_or_b32_e32 v21, v22, v21
+; CI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40
+; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: s_waitcnt vmcnt(4)
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v22
+; CI-NEXT: v_or_b32_e32 v22, v23, v27
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:52
+; CI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
+; CI-NEXT: v_or_b32_e32 v23, v28, v23
+; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56
+; CI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:48
+; CI-NEXT: s_waitcnt vmcnt(2)
; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
-; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x74, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:108
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:104
; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v26
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v27
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:92
-; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; CI-NEXT: v_or_b32_e32 v25, v26, v25
-; CI-NEXT: v_add_i32_e32 v26, vcc, 0x70, v0
-; CI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:100
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:96
-; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; CI-NEXT: v_cvt_f32_f16_e32 v24, v24
; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v24, v24
; CI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
-; CI-NEXT: v_or_b32_e32 v23, v23, v27
-; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; CI-NEXT: v_or_b32_e32 v24, v24, v27
+; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x68, v0
-; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; CI-NEXT: v_or_b32_e32 v25, v26, v25
-; CI-NEXT: v_add_i32_e32 v26, vcc, 0x6c, v0
-; CI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:68
-; CI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:64
-; CI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:76
-; CI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:72
-; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:84
-; CI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:80
-; CI-NEXT: s_waitcnt vmcnt(3)
-; CI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; CI-NEXT: v_cvt_f16_f32_e32 v25, v25
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; CI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; CI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; CI-NEXT: v_or_b32_e32 v25, v26, v25
-; CI-NEXT: s_waitcnt vmcnt(2)
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_cvt_f32_f16_e32 v27, v27
+; CI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; CI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
+; CI-NEXT: v_or_b32_e32 v27, v28, v27
+; CI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:68
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v27, v29
-; CI-NEXT: v_or_b32_e32 v23, v26, v23
-; CI-NEXT: v_cvt_f16_f32_e32 v26, v28
-; CI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
-; CI-NEXT: v_or_b32_e32 v26, v27, v26
-; CI-NEXT: v_add_i32_e32 v27, vcc, 0x64, v0
-; CI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v26, vcc, 0x60, v0
-; CI-NEXT: buffer_store_dword v23, v26, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v23, vcc, 0x5c, v0
-; CI-NEXT: buffer_store_dword v25, v23, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v23, vcc, 0x58, v0
-; CI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v22, vcc, 0x54, v0
-; CI-NEXT: buffer_store_dword v24, v22, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v22, vcc, 0x50, v0
-; CI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v21, vcc, 0x4c, v0
-; CI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v20, vcc, 0x48, v0
-; CI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v19, vcc, 0x44, v0
-; CI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen
-; CI-NEXT: v_add_i32_e32 v18, vcc, 64, v0
-; CI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; CI-NEXT: v_cvt_f16_f32_e32 v28, v28
+; CI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
+; CI-NEXT: v_or_b32_e32 v28, v29, v28
+; CI-NEXT: buffer_store_dword v28, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x58, v0
+; CI-NEXT: buffer_store_dword v27, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x54, v0
+; CI-NEXT: buffer_store_dword v24, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x50, v0
+; CI-NEXT: buffer_store_dword v23, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x4c, v0
+; CI-NEXT: buffer_store_dword v22, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x48, v0
+; CI-NEXT: buffer_store_dword v21, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 0x44, v0
+; CI-NEXT: buffer_store_dword v19, v17, s[0:3], 0 offen
+; CI-NEXT: v_add_i32_e32 v17, vcc, 64, v0
+; CI-NEXT: buffer_store_dword v20, v17, s[0:3], 0 offen
; CI-NEXT: v_add_i32_e32 v17, vcc, 60, v0
; CI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen
; CI-NEXT: v_add_i32_e32 v16, vcc, 56, v0
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll
index c1093a1..d53c041 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.ll
@@ -2389,7 +2389,6 @@ define amdgpu_kernel void @test_canonicalize_value_f16_flush(ptr addrspace(1) %a
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
@@ -2471,15 +2470,13 @@ define amdgpu_kernel void @test_canonicalize_value_v2f16_flush(ptr addrspace(1)
; GFX6-NEXT: flat_load_dword v0, v[0:1]
; GFX6-NEXT: v_mov_b32_e32 v3, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v0
-; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX6-NEXT: v_or_b32_e32 v4, v1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v4, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; GFX6-NEXT: flat_store_dword v[0:1], v4
@@ -2724,7 +2721,6 @@ define amdgpu_kernel void @test_canonicalize_value_f16_denorm(ptr addrspace(1) %
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
@@ -2807,15 +2803,13 @@ define amdgpu_kernel void @test_canonicalize_value_v2f16_denorm(ptr addrspace(1)
; GFX6-NEXT: flat_load_dword v0, v[0:1]
; GFX6-NEXT: v_mov_b32_e32 v3, s3
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v0
-; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX6-NEXT: v_or_b32_e32 v4, v1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v4, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v2
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; GFX6-NEXT: flat_store_dword v[0:1], v4
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
index 78fb89c..b32630a 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
@@ -951,8 +951,6 @@ define half @v_fneg_minnum_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1056,7 +1054,6 @@ define half @v_fneg_posk_minnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1110,7 +1107,6 @@ define half @v_fneg_negk_minnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1193,7 +1189,6 @@ define half @v_fneg_neg0_minnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1222,7 +1217,6 @@ define half @v_fneg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1253,7 +1247,6 @@ define half @v_fneg_neg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1311,7 +1304,6 @@ define half @v_fneg_0_minnum_foldable_use_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 0, v0
; SI-NEXT: v_mul_f32_e64 v0, -v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1346,7 +1338,6 @@ define half @v_fneg_inv2pi_minnum_foldable_use_f16(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: v_mul_f32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1413,8 +1404,6 @@ define { half, half } @v_fneg_minnum_multi_use_minnum_f16_ieee(half %a, half %b)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e64 v1, -v1
; SI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: v_mul_f32_e32 v1, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1494,8 +1483,6 @@ define half @v_fneg_maxnum_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1599,7 +1586,6 @@ define half @v_fneg_posk_maxnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1653,7 +1639,6 @@ define half @v_fneg_negk_maxnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1736,7 +1721,6 @@ define half @v_fneg_neg0_maxnum_f16_ieee(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1792,7 +1776,6 @@ define half @v_fneg_0_maxnum_foldable_use_f16_ieee(half %a, half %b) #0 {
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0, v0
; SI-NEXT: v_mul_f32_e64 v0, -v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1859,8 +1842,6 @@ define { half, half } @v_fneg_maxnum_multi_use_maxnum_f16_ieee(half %a, half %b)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e64 v1, -v1
; SI-NEXT: v_cvt_f32_f16_e64 v0, -v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: v_mul_f32_e32 v1, -4.0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -3980,7 +3961,8 @@ define half @v_fneg_canonicalize_f16(half %a) #0 {
; SI-LABEL: v_fneg_canonicalize_f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_fneg_canonicalize_f16:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
index 17f6761..b5440b9 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
@@ -1021,7 +1021,6 @@ define half @v_fneg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1043,7 +1042,6 @@ define half @v_fneg_neg_inv2pi_minnum_f16(half %a) #0 {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e64 v0, -v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0x3e230000, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir b/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
index 3616d61..5ef8a94 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
@@ -8,6 +8,8 @@
---
name: restore_undef_copy_use
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
machineFunctionInfo:
maxKernArgAlign: 1
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/fp-classify.ll b/llvm/test/CodeGen/AMDGPU/fp-classify.ll
index 6fa7df9..18d2e52 100644
--- a/llvm/test/CodeGen/AMDGPU/fp-classify.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp-classify.ll
@@ -618,16 +618,16 @@ define amdgpu_kernel void @test_not_isfinite_pattern_4_wrong_ord_test(ptr addrsp
define amdgpu_kernel void @test_isinf_pattern_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isinf_pattern_f16:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT: s_load_dword s0, s[0:1], 0xb
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s1, 0x7f800000
+; SI-NEXT: s_load_dword s4, s[0:1], 0xb
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e64 v0, |s0|
-; SI-NEXT: v_cmp_eq_f32_e32 vcc, s1, v0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_and_b32 s4, s4, 0x7fff
+; SI-NEXT: s_cmpk_eq_i32 s4, 0x7c00
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isinf_pattern_f16:
@@ -667,16 +667,19 @@ define amdgpu_kernel void @test_isinf_pattern_f16(ptr addrspace(1) nocapture %ou
define amdgpu_kernel void @test_isfinite_pattern_0_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isfinite_pattern_0_f16:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT: s_load_dword s0, s[0:1], 0xb
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_movk_i32 s1, 0x1f8
+; SI-NEXT: s_load_dword s4, s[0:1], 0xb
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
-; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
+; SI-NEXT: s_and_b32 s4, s4, 0x7fff
+; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; SI-NEXT: s_cmpk_lg_i32 s4, 0x7c00
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isfinite_pattern_0_f16:
@@ -718,16 +721,19 @@ define amdgpu_kernel void @test_isfinite_pattern_0_f16(ptr addrspace(1) nocaptur
define amdgpu_kernel void @test_isfinite_pattern_4_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isfinite_pattern_4_f16:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT: s_load_dword s0, s[0:1], 0xb
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_movk_i32 s1, 0x1f8
+; SI-NEXT: s_load_dword s4, s[0:1], 0xb
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
-; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
+; SI-NEXT: s_and_b32 s4, s4, 0x7fff
+; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; SI-NEXT: s_cmpk_lt_i32 s4, 0x7c00
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isfinite_pattern_4_f16:
diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
index 767d347..a948fab 100644
--- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
@@ -1181,18 +1181,28 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB42_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB42_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1200,20 +1210,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB42_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB42_2
+; GFX90A-NEXT: .LBB42_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB42_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB42_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst
@@ -1223,26 +1243,45 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB43_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB43_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB43_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB43_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1252,18 +1291,28 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB44_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB44_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1271,20 +1320,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB44_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB44_2
+; GFX90A-NEXT: .LBB44_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB44_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB44_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst
@@ -1294,26 +1353,45 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(1) %ptr) #0 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB45_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB45_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB45_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB45_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1485,37 +1563,57 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrspace(1) %ptr) {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB52_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB52_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB52_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB52_2
+; GFX90A-NEXT: .LBB52_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB52_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB52_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -2020,23 +2118,42 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr) #1 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB70_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB70_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB70_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB70_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2046,23 +2163,42 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3) %ptr) #0 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB71_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB71_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB71_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB71_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2072,46 +2208,66 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrspace(3) %ptr) #4 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX90A: ; %bb.0: ; %main_body
-; GFX90A-NEXT: s_load_dword s2, s[0:1], 0x24
-; GFX90A-NEXT: s_mov_b64 s[0:1], 0
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB72_3
+; GFX90A-NEXT: ; %bb.1:
+; GFX90A-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v0, s2
-; GFX90A-NEXT: ds_read_b64 v[0:1], v0
-; GFX90A-NEXT: v_mov_b32_e32 v2, s2
-; GFX90A-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX90A-NEXT: v_mov_b32_e32 v0, s4
+; GFX90A-NEXT: ds_read_b64 v[2:3], v0
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
+; GFX90A-NEXT: s_mov_b64 s[0:1], 0
+; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: .LBB72_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX90A-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX90A-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[6:7], v[6:7] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execnz .LBB72_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB72_2
+; GFX90A-NEXT: .LBB72_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX940: ; %bb.0: ; %main_body
-; GFX940-NEXT: s_load_dword s2, s[0:1], 0x24
-; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB72_3
+; GFX940-NEXT: ; %bb.1:
+; GFX940-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_mov_b32_e32 v0, s2
-; GFX940-NEXT: ds_read_b64 v[0:1], v0
-; GFX940-NEXT: v_mov_b32_e32 v2, s2
-; GFX940-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX940-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NEXT: ds_read_b64 v[2:3], v0
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_mov_b32_e32 v4, s4
+; GFX940-NEXT: .LBB72_2: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX940-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX940-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX940-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execnz .LBB72_1
-; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_cbranch_execnz .LBB72_2
+; GFX940-NEXT: .LBB72_3:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
new file mode 100644
index 0000000..66bf0d5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -0,0 +1,1502 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GISEL %s
+
+define i128 @fptosi_f64_to_i128(double %x) {
+; SDAG-LABEL: fptosi_f64_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_bfe_u32 v6, v5, 20, 11
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x3fe
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB0_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB0_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v10, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x432
+; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v5
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v5, 0x100000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB0_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0x473, v6
+; SDAG-NEXT: v_add_u32_e32 v2, 0xfffffb8d, v6
+; SDAG-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v11, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, v1
+; SDAG-NEXT: v_mul_lo_u32 v6, v11, v6
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v7, v11, v[2:3]
+; SDAG-NEXT: v_mul_lo_u32 v10, v10, v12
+; SDAG-NEXT: v_add3_u32 v5, v5, v6, v13
+; SDAG-NEXT: v_mov_b32_e32 v6, v2
+; SDAG-NEXT: v_mov_b32_e32 v2, v3
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v9, v9, v7
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr11
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: v_add3_u32 v4, v10, v4, v9
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v3
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v4, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: .LBB0_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[12:13], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB0_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x433, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v0, v4, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v1, v5, s[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v11, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v7, v4
+; SDAG-NEXT: v_mov_b32_e32 v4, v2
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v6, v8, v[3:4]
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v7, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v8, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v9, v6, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v10, v6, v[3:4]
+; SDAG-NEXT: v_mad_i32_i24 v3, v9, v5, v3
+; SDAG-NEXT: .LBB0_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB0_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB0_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptosi_f64_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v5, v1
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 20, v5
+; GISEL-NEXT: v_and_b32_e32 v6, 0x7ff, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x3ff
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, 0
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB0_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v20, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v20
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v20
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x433
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v5, 0x100000, v2
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB0_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffbcd, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: .LBB0_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB0_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v6, vcc, 0x433, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v10, 0
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v1, v5, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3]
+; GISEL-NEXT: v_mul_lo_u32 v6, v5, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: .LBB0_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: .LBB0_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB0_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB0_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB0_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptosi double %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptoui_f64_to_i128(double %x) {
+; SDAG-LABEL: fptoui_f64_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_bfe_u32 v6, v5, 20, 11
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x3fe
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB1_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB1_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v10, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x432
+; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v5
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v5, 0x100000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB1_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0x473, v6
+; SDAG-NEXT: v_add_u32_e32 v2, 0xfffffb8d, v6
+; SDAG-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v11, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, v1
+; SDAG-NEXT: v_mul_lo_u32 v6, v11, v6
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v7, v11, v[2:3]
+; SDAG-NEXT: v_mul_lo_u32 v10, v10, v12
+; SDAG-NEXT: v_add3_u32 v5, v5, v6, v13
+; SDAG-NEXT: v_mov_b32_e32 v6, v2
+; SDAG-NEXT: v_mov_b32_e32 v2, v3
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v9, v9, v7
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr11
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: v_add3_u32 v4, v10, v4, v9
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v3
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v4, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: .LBB1_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[12:13], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB1_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x433, v6
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v0, v4, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v1, v5, s[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v11, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v11, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v7, v4
+; SDAG-NEXT: v_mov_b32_e32 v4, v2
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v6, v8, v[3:4]
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v7, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v8, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v9, v6, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v10, v6, v[3:4]
+; SDAG-NEXT: v_mad_i32_i24 v3, v9, v5, v3
+; SDAG-NEXT: .LBB1_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB1_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB1_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptoui_f64_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v5, v1
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 20, v5
+; GISEL-NEXT: v_and_b32_e32 v6, 0x7ff, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x3ff
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, 0
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB1_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v20, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v20
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v20
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x433
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v5, 0x100000, v2
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB1_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffbcd, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: .LBB1_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB1_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v6, vcc, 0x433, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v10, 0
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v1, v5, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3]
+; GISEL-NEXT: v_mul_lo_u32 v6, v5, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: .LBB1_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: .LBB1_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB1_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB1_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB1_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptoui double %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptosi_f32_to_i128(float %x) {
+; SDAG-LABEL: fptosi_f32_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_bfe_u32 v5, v4, 23, 8
+; SDAG-NEXT: s_movk_i32 s4, 0x7e
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_cmp_lt_u32_e32 vcc, s4, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB2_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, -1, v4
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB2_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x95
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v4
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v6, 0x800000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB2_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0xd6, v5
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff2a, v5
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff6a, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v14, v8, v2
+; SDAG-NEXT: v_mul_lo_u32 v15, v10, v3
+; SDAG-NEXT: v_mov_b32_e32 v6, v1
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v12, v10, v[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, v5
+; SDAG-NEXT: v_mov_b32_e32 v5, v7
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v13, v8, v[4:5]
+; SDAG-NEXT: v_add3_u32 v3, v3, v15, v14
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v13, v[2:3]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v5
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v3, v9, v12
+; SDAG-NEXT: v_mul_lo_u32 v7, v11, v13
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v12, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: v_add3_u32 v3, v7, v2, v3
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v1
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v3, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr5_vgpr6
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: .LBB2_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB2_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: .LBB2_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: .LBB2_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB2_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptosi_f32_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], 23, v[4:5]
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_bfe_u32 v6, v0, 0, 8
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB2_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v5, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v5
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v5
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v9, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v8, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x96
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v4, 0x800000, v2
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB2_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff6a, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v9, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v8, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v9, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: .LBB2_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB2_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x96, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v3, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[1:2], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v10, 0
+; GISEL-NEXT: v_mul_lo_u32 v5, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GISEL-NEXT: .LBB2_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB2_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB2_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB2_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB2_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptosi float %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptoui_f32_to_i128(float %x) {
+; SDAG-LABEL: fptoui_f32_to_i128:
+; SDAG: ; %bb.0: ; %fp-to-i-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_bfe_u32 v5, v4, 23, 8
+; SDAG-NEXT: s_movk_i32 s4, 0x7e
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: v_mov_b32_e32 v3, 0
+; SDAG-NEXT: v_cmp_lt_u32_e32 vcc, s4, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB3_10
+; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
+; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, -1, v4
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB3_7
+; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x95
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v4
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
+; SDAG-NEXT: v_mov_b32_e32 v7, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v6, 0x800000, v0
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
+; SDAG-NEXT: s_cbranch_execz .LBB3_4
+; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; SDAG-NEXT: v_sub_u32_e32 v0, 0xd6, v5
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff2a, v5
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff6a, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v1, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v10, 0
+; SDAG-NEXT: v_mul_lo_u32 v14, v8, v2
+; SDAG-NEXT: v_mul_lo_u32 v15, v10, v3
+; SDAG-NEXT: v_mov_b32_e32 v6, v1
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v12, v10, v[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
+; SDAG-NEXT: v_mov_b32_e32 v6, v5
+; SDAG-NEXT: v_mov_b32_e32 v5, v7
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v13, v8, v[4:5]
+; SDAG-NEXT: v_add3_u32 v3, v3, v15, v14
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v13, v[2:3]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v5
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mul_lo_u32 v3, v9, v12
+; SDAG-NEXT: v_mul_lo_u32 v7, v11, v13
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v12, v8, v[5:6]
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: v_add3_u32 v3, v7, v2, v3
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v1
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v3, s[4:5]
+; SDAG-NEXT: ; implicit-def: $vgpr5_vgpr6
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: .LBB3_4: ; %Flow
+; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[12:13]
+; SDAG-NEXT: s_cbranch_execz .LBB3_6
+; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
+; SDAG-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
+; SDAG-NEXT: v_mov_b32_e32 v1, v4
+; SDAG-NEXT: .LBB3_6: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: .LBB3_7: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
+; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT: v_mov_b32_e32 v0, v2
+; SDAG-NEXT: v_mov_b32_e32 v1, v2
+; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB3_10: ; %fp-to-i-cleanup
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptoui_f32_to_i128:
+; GISEL: ; %bb.0: ; %fp-to-i-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], 23, v[4:5]
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_bfe_u32 v6, v0, 0, 8
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB3_10
+; GISEL-NEXT: ; %bb.1: ; %fp-to-i-if-end
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v6
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
+; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
+; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
+; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[14:15], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_7
+; GISEL-NEXT: ; %bb.2: ; %fp-to-i-if-end9
+; GISEL-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[6:7]
+; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v2, 1, v0
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[6:7]
+; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v5, 3, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v8, 4, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v9, 5, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v10, 6, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v11, 7, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v12, 8, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v13, 9, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v14, 10, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v15, 11, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v16, 12, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v17, 13, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v18, 14, v0
+; GISEL-NEXT: v_lshlrev_b16_e32 v19, 15, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v5
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v5
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v8
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v8
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v9
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v10
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v11
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v11
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v12
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v12
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v13
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v14
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v14
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v15
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v15
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v16
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v17
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v17
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v19
+; GISEL-NEXT: v_or_b32_e32 v1, v1, v19
+; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0
+; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0
+; GISEL-NEXT: v_or3_b32 v9, v1, v2, 1
+; GISEL-NEXT: v_or3_b32 v8, v0, v2, 0
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x96
+; GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
+; GISEL-NEXT: v_or_b32_e32 v4, 0x800000, v2
+; GISEL-NEXT: v_mov_b32_e32 v5, 0
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
+; GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
+; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff6a, v6
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0
+; GISEL-NEXT: v_subrev_u32_e32 v7, 64, v6
+; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6
+; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[6:7]
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v9, 0
+; GISEL-NEXT: v_mov_b32_e32 v2, v6
+; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v8, v[1:2]
+; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v9, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7]
+; GISEL-NEXT: ; implicit-def: $vgpr6
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: .LBB3_4: ; %Flow
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
+; GISEL-NEXT: s_cbranch_execz .LBB3_6
+; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
+; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x96, v6
+; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
+; GISEL-NEXT: v_lshrrev_b64 v[0:1], v3, v[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[1:2], v2, 0
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0
+; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v10, 0
+; GISEL-NEXT: v_mul_lo_u32 v5, v4, v10
+; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2]
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GISEL-NEXT: .LBB3_6: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB3_7: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
+; GISEL-NEXT: s_cbranch_execz .LBB3_9
+; GISEL-NEXT: ; %bb.8: ; %fp-to-i-if-then5
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v1, 1, v1
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GISEL-NEXT: v_lshlrev_b32_e32 v2, 1, v1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v2, v1, v2
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 4, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 5, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 6, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 7, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 8, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 9, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 10, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 11, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 12, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v14, 13, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v14
+; GISEL-NEXT: v_lshlrev_b32_e32 v15, 14, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v16, 15, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v13, v14
+; GISEL-NEXT: v_or3_b32 v0, v0, v15, v16
+; GISEL-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v18, 17, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v15, v16
+; GISEL-NEXT: v_or3_b32 v0, v0, v17, v18
+; GISEL-NEXT: v_lshlrev_b32_e32 v19, 18, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v20, 19, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v17, v18
+; GISEL-NEXT: v_or3_b32 v0, v0, v19, v20
+; GISEL-NEXT: v_lshlrev_b32_e32 v3, 20, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v4, 21, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v19, v20
+; GISEL-NEXT: v_or3_b32 v0, v0, v3, v4
+; GISEL-NEXT: v_lshlrev_b32_e32 v5, 22, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v6, 23, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v3, v4
+; GISEL-NEXT: v_or3_b32 v0, v0, v5, v6
+; GISEL-NEXT: v_lshlrev_b32_e32 v7, 24, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v8, 25, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v5, v6
+; GISEL-NEXT: v_or3_b32 v0, v0, v7, v8
+; GISEL-NEXT: v_lshlrev_b32_e32 v9, 26, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v10, 27, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v7, v8
+; GISEL-NEXT: v_or3_b32 v0, v0, v9, v10
+; GISEL-NEXT: v_lshlrev_b32_e32 v11, 28, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v12, 29, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v9, v10
+; GISEL-NEXT: v_or3_b32 v0, v0, v11, v12
+; GISEL-NEXT: v_lshlrev_b32_e32 v13, 30, v1
+; GISEL-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GISEL-NEXT: v_or3_b32 v2, v2, v11, v12
+; GISEL-NEXT: v_or3_b32 v0, v0, v13, v1
+; GISEL-NEXT: v_or3_b32 v1, v2, v13, v1
+; GISEL-NEXT: v_add_u32_e32 v3, 0x80000000, v1
+; GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GISEL-NEXT: .LBB3_9: ; %Flow3
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: .LBB3_10: ; %fp-to-i-cleanup
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptoui float %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptosi_f16_to_i128(half %x) {
+; GCN-LABEL: fptosi_f16_to_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT: v_cvt_i32_f32_e32 v0, v0
+; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_mov_b32_e32 v3, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptosi half %x to i128
+ ret i128 %cvt
+}
+
+define i128 @fptoui_f16_to_i128(half %x) {
+; GCN-LABEL: fptoui_f16_to_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT: v_mov_b32_e32 v1, 0
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cvt = fptoui half %x to i128
+ ret i128 %cvt
+}
+
+; FIXME: ExpandLargeFpConvert asserts on bfloat
+; define i128 @fptosi_bf16_to_i128(bfloat %x) {
+; %cvt = fptosi bfloat %x to i128
+; ret i128 %cvt
+; }
+
+; define i128 @fptoui_bf16_to_i128(bfloat %x) {
+; %cvt = fptoui bfloat %x to i128
+; ret i128 %cvt
+; }
diff --git a/llvm/test/CodeGen/AMDGPU/fract-match.ll b/llvm/test/CodeGen/AMDGPU/fract-match.ll
index 3a0b825..e361aa4 100644
--- a/llvm/test/CodeGen/AMDGPU/fract-match.ll
+++ b/llvm/test/CodeGen/AMDGPU/fract-match.ll
@@ -1705,16 +1705,16 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX6-NEXT: v_min_f32_e32 v7, 0x3f7fffff, v7
; GFX6-NEXT: v_cndmask_b32_e32 v6, v6, v1, vcc
; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX6-NEXT: s_movk_i32 s10, 0x204
+; GFX6-NEXT: v_mov_b32_e32 v8, 0x204
; GFX6-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc
-; GFX6-NEXT: v_cmp_class_f32_e64 s[8:9], v0, s10
+; GFX6-NEXT: v_cmp_class_f32_e32 vcc, v0, v8
; GFX6-NEXT: s_mov_b32 s6, 0
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v7, 0, s[8:9]
-; GFX6-NEXT: v_cmp_class_f32_e64 s[8:9], v1, s10
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v7, 0, vcc
+; GFX6-NEXT: v_cmp_class_f32_e32 vcc, v1, v8
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v6, 0, s[8:9]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v6, 0, vcc
; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[2:3], s[4:7], 0 addr64
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: s_setpc_b64 s[30:31]
@@ -1722,19 +1722,19 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX7-LABEL: safe_math_fract_v2f32:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX7-NEXT: v_mov_b32_e32 v8, 0x204
; GFX7-NEXT: v_fract_f32_e32 v6, v0
-; GFX7-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s8
+; GFX7-NEXT: v_cmp_class_f32_e32 vcc, v0, v8
; GFX7-NEXT: s_mov_b32 s6, 0
; GFX7-NEXT: v_floor_f32_e32 v4, v0
; GFX7-NEXT: v_fract_f32_e32 v7, v1
-; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e64 vcc, |v1|, s8
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX7-NEXT: v_cmp_class_f32_e32 vcc, v1, v8
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
; GFX7-NEXT: v_floor_f32_e32 v5, v1
-; GFX7-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v7, 0, vcc
; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[2:3], s[4:7], 0 addr64
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -1742,15 +1742,15 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX8-LABEL: safe_math_fract_v2f32:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, 0x7f800000
+; GFX8-NEXT: v_mov_b32_e32 v8, 0x204
; GFX8-NEXT: v_fract_f32_e32 v6, v0
-; GFX8-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s4
+; GFX8-NEXT: v_cmp_class_f32_e32 vcc, v0, v8
; GFX8-NEXT: v_floor_f32_e32 v4, v0
; GFX8-NEXT: v_fract_f32_e32 v7, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc
-; GFX8-NEXT: v_cmp_neq_f32_e64 vcc, |v1|, s4
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX8-NEXT: v_cmp_class_f32_e32 vcc, v1, v8
; GFX8-NEXT: v_floor_f32_e32 v5, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v7, 0, vcc
; GFX8-NEXT: global_store_dwordx2 v[2:3], v[4:5], off
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_setpc_b64 s[30:31]
@@ -1759,14 +1759,15 @@ define <2 x float> @safe_math_fract_v2f32(<2 x float> %x, ptr addrspace(1) nocap
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_fract_f32_e32 v6, v0
-; GFX11-NEXT: v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v0|
+; GFX11-NEXT: v_cmp_class_f32_e64 s0, v0, 0x204
; GFX11-NEXT: v_fract_f32_e32 v7, v1
; GFX11-NEXT: v_floor_f32_e32 v4, v0
; GFX11-NEXT: v_floor_f32_e32 v5, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc_lo
-; GFX11-NEXT: v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v1|
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, 0, s0
+; GFX11-NEXT: v_cmp_class_f32_e64 s0, v1, 0x204
; GFX11-NEXT: global_store_b64 v[2:3], v[4:5], off
-; GFX11-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v7, 0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%floor = tail call <2 x float> @llvm.floor.v2f32(<2 x float> %x)
@@ -1937,21 +1938,22 @@ define half @safe_math_fract_f16(half %x, ptr addrspace(1) nocapture writeonly %
; GFX6: ; %bb.0: ; %entry
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX6-NEXT: s_movk_i32 s8, 0x7c00
; GFX6-NEXT: s_mov_b32 s6, 0
; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v0
+; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: v_floor_f32_e32 v3, v0
-; GFX6-NEXT: v_sub_f32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX6-NEXT: v_min_f32_e32 v4, 0x3f7fe000, v4
-; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
-; GFX6-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s8
-; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT: buffer_store_short v3, v[1:2], s[4:7], 0 addr64
+; GFX6-NEXT: v_floor_f32_e32 v4, v3
+; GFX6-NEXT: v_sub_f32_e32 v5, v3, v4
+; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX6-NEXT: v_min_f32_e32 v5, 0x3f7fe000, v5
+; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
+; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
+; GFX6-NEXT: buffer_store_short v4, v[1:2], s[4:7], 0 addr64
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
@@ -1959,21 +1961,22 @@ define half @safe_math_fract_f16(half %x, ptr addrspace(1) nocapture writeonly %
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX7-NEXT: s_movk_i32 s8, 0x7c00
; GFX7-NEXT: s_mov_b32 s6, 0
; GFX7-NEXT: s_mov_b32 s7, 0xf000
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff, v0
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: v_floor_f32_e32 v3, v0
-; GFX7-NEXT: v_sub_f32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX7-NEXT: v_min_f32_e32 v4, 0x3f7fe000, v4
-; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e64 vcc, |v0|, s8
-; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX7-NEXT: buffer_store_short v3, v[1:2], s[4:7], 0 addr64
+; GFX7-NEXT: v_floor_f32_e32 v4, v3
+; GFX7-NEXT: v_sub_f32_e32 v5, v3, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_min_f32_e32 v5, 0x3f7fe000, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
+; GFX7-NEXT: buffer_store_short v4, v[1:2], s[4:7], 0 addr64
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -2062,12 +2065,12 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX6-NEXT: s_movk_i32 s8, 0x7c00
; GFX6-NEXT: s_mov_b32 s6, 0
; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v5, v0
-; GFX6-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; GFX6-NEXT: v_cvt_f32_f16_e64 v1, |v1|
+; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GFX6-NEXT: v_and_b32_e32 v1, 0x7fff, v1
; GFX6-NEXT: v_floor_f32_e32 v6, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v7, v6
; GFX6-NEXT: v_floor_f32_e32 v8, v5
@@ -2080,10 +2083,10 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX6-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX6-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX6-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
-; GFX6-NEXT: v_cmp_neq_f32_e32 vcc, s8, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v5, vcc
-; GFX6-NEXT: v_cmp_neq_f32_e32 vcc, s8, v1
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, s8, v1
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
@@ -2098,12 +2101,12 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: s_mov_b32 s8, 0x7f800000
+; GFX7-NEXT: s_movk_i32 s8, 0x7c00
; GFX7-NEXT: s_mov_b32 s6, 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v0
-; GFX7-NEXT: v_cvt_f32_f16_e64 v0, |v0|
-; GFX7-NEXT: v_cvt_f32_f16_e64 v1, |v1|
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0x7fff, v1
; GFX7-NEXT: v_floor_f32_e32 v6, v4
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v6
; GFX7-NEXT: v_floor_f32_e32 v8, v5
@@ -2116,10 +2119,10 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX7-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX7-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e32 vcc, s8, v0
+; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, s8, v0
; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v5, vcc
-; GFX7-NEXT: v_cmp_neq_f32_e32 vcc, s8, v1
+; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, s8, v1
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
@@ -2133,16 +2136,16 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX8-NEXT: s_movk_i32 s6, 0x204
+; GFX8-NEXT: v_mov_b32_e32 v7, 0x204
; GFX8-NEXT: v_floor_f16_e32 v4, v3
; GFX8-NEXT: v_floor_f16_e32 v5, v0
; GFX8-NEXT: v_fract_f16_e32 v6, v3
-; GFX8-NEXT: v_cmp_class_f16_e64 s[4:5], v3, s6
+; GFX8-NEXT: v_cmp_class_f16_e32 vcc, v3, v7
; GFX8-NEXT: v_pack_b32_f16 v4, v5, v4
; GFX8-NEXT: v_fract_f16_e32 v5, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, 0, s[4:5]
-; GFX8-NEXT: v_cmp_class_f16_e64 s[4:5], v0, s6
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v5, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, 0, vcc
+; GFX8-NEXT: v_cmp_class_f16_e32 vcc, v0, v7
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
; GFX8-NEXT: v_pack_b32_f16 v0, v0, v3
; GFX8-NEXT: global_store_dword v[1:2], v4, off
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -2237,19 +2240,19 @@ define <2 x double> @safe_math_fract_v2f64(<2 x double> %x, ptr addrspace(1) noc
; GFX6-NEXT: v_cndmask_b32_e32 v11, v11, v3, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v10, v10, v2, vcc
; GFX6-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GFX6-NEXT: s_movk_i32 s10, 0x204
-; GFX6-NEXT: v_cmp_class_f64_e64 s[8:9], v[0:1], s10
+; GFX6-NEXT: v_mov_b32_e32 v14, 0x204
; GFX6-NEXT: v_cndmask_b32_e32 v13, v13, v1, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v12, v12, v0, vcc
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v12, 0, s[8:9]
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v13, 0, s[8:9]
-; GFX6-NEXT: v_cmp_class_f64_e64 s[8:9], v[2:3], s10
+; GFX6-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v14
; GFX6-NEXT: s_mov_b32 s6, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v12, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v13, 0, vcc
+; GFX6-NEXT: v_cmp_class_f64_e32 vcc, v[2:3], v14
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: v_cndmask_b32_e64 v2, v10, 0, s[8:9]
-; GFX6-NEXT: v_cndmask_b32_e64 v3, v11, 0, s[8:9]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v10, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v11, 0, vcc
; GFX6-NEXT: buffer_store_dwordx4 v[6:9], v[4:5], s[4:7], 0 addr64
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: s_setpc_b64 s[30:31]
@@ -2257,39 +2260,39 @@ define <2 x double> @safe_math_fract_v2f64(<2 x double> %x, ptr addrspace(1) noc
; GFX7-LABEL: safe_math_fract_v2f64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: s_movk_i32 s4, 0x204
+; GFX7-NEXT: v_mov_b32_e32 v6, 0x204
; GFX7-NEXT: v_fract_f64_e32 v[10:11], v[0:1]
-; GFX7-NEXT: v_cmp_class_f64_e64 s[8:9], v[0:1], s4
+; GFX7-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v6
; GFX7-NEXT: v_fract_f64_e32 v[12:13], v[2:3]
-; GFX7-NEXT: v_cmp_class_f64_e64 s[10:11], v[2:3], s4
+; GFX7-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v6
; GFX7-NEXT: v_floor_f64_e32 v[8:9], v[2:3]
; GFX7-NEXT: v_floor_f64_e32 v[6:7], v[0:1]
-; GFX7-NEXT: s_mov_b32 s6, 0
-; GFX7-NEXT: s_mov_b32 s7, 0xf000
-; GFX7-NEXT: s_mov_b32 s4, s6
-; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: v_cndmask_b32_e64 v0, v10, 0, s[8:9]
-; GFX7-NEXT: v_cndmask_b32_e64 v1, v11, 0, s[8:9]
-; GFX7-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[10:11]
-; GFX7-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[10:11]
-; GFX7-NEXT: buffer_store_dwordx4 v[6:9], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: s_mov_b32 s10, 0
+; GFX7-NEXT: s_mov_b32 s11, 0xf000
+; GFX7-NEXT: s_mov_b32 s8, s10
+; GFX7-NEXT: s_mov_b32 s9, s10
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v10, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v11, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[4:5]
+; GFX7-NEXT: buffer_store_dwordx4 v[6:9], v[4:5], s[8:11], 0 addr64
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: safe_math_fract_v2f64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: s_movk_i32 s6, 0x204
+; GFX8-NEXT: v_mov_b32_e32 v6, 0x204
; GFX8-NEXT: v_fract_f64_e32 v[10:11], v[0:1]
-; GFX8-NEXT: v_cmp_class_f64_e64 s[4:5], v[0:1], s6
+; GFX8-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v6
; GFX8-NEXT: v_fract_f64_e32 v[12:13], v[2:3]
-; GFX8-NEXT: v_cmp_class_f64_e64 s[6:7], v[2:3], s6
+; GFX8-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v6
; GFX8-NEXT: v_floor_f64_e32 v[8:9], v[2:3]
; GFX8-NEXT: v_floor_f64_e32 v[6:7], v[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v10, 0, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v11, 0, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[6:7]
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v10, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v11, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v13, 0, s[4:5]
; GFX8-NEXT: global_store_dwordx4 v[4:5], v[6:9], off
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index e3fada3..b717280 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -1,71 +1,43 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
+
+; Tests various combinations of uniform/divergent address and uniform/divergent value inputs of various types for atomic operations.
+; Optimization remains same for Iterative and DPP strategies when value in uniform. These different scan/reduction
+; strategies are valid for only divergent values. This optimization is valid for divergent addresses. Test also covers different scopes.
define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]])
-; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
-; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]]
-; IR-ITERATIVE-NEXT: br label [[TMP24]]
-; IR-ITERATIVE: 24:
-; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-ITERATIVE-NEXT: ret float [[TMP25]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-DPP-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]])
-; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
-; IR-DPP-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-DPP-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]]
-; IR-DPP-NEXT: br label [[TMP24]]
-; IR-DPP: 24:
-; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
-; IR-DPP-NEXT: ret float [[TMP25]]
+; IR-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
+; IR-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
+; IR-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]])
+; IR-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
+; IR-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
+; IR-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]]
+; IR-NEXT: br label [[TMP24]]
+; IR: 24:
+; IR-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
+; IR-NEXT: ret float [[TMP25]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret float %result
@@ -411,7 +383,6 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
ret float %result
}
-
define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
@@ -514,61 +485,33 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
}
define amdgpu_ps float @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-ITERATIVE: 10:
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP12]]
-; IR-ITERATIVE: 12:
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
-; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = select i1 [[TMP9]], float 0x7FF0000000000000, float [[VAL]]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call float @llvm.minnum.f32(float [[TMP16]], float [[TMP18]])
-; IR-ITERATIVE-NEXT: br label [[TMP20]]
-; IR-ITERATIVE: 20:
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
-; IR-ITERATIVE-NEXT: ret float [[TMP21]]
-;
-; IR-DPP-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-DPP: 10:
-; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP12]]
-; IR-DPP: 12:
-; IR-DPP-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-DPP-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-DPP-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
-; IR-DPP-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
-; IR-DPP-NEXT: [[TMP17:%.*]] = uitofp i32 [[TMP8]] to float
-; IR-DPP-NEXT: [[TMP18:%.*]] = select i1 [[TMP9]], float 0x7FF0000000000000, float [[VAL]]
-; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.minnum.f32(float [[TMP16]], float [[TMP18]])
-; IR-DPP-NEXT: br label [[TMP20]]
-; IR-DPP: 20:
-; IR-DPP-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
-; IR-DPP-NEXT: ret float [[TMP21]]
+; IR-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
+; IR-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
+; IR-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
+; IR-NEXT: [[TMP17:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-NEXT: [[TMP18:%.*]] = select i1 [[TMP9]], float 0x7FF0000000000000, float [[VAL]]
+; IR-NEXT: [[TMP19:%.*]] = call float @llvm.minnum.f32(float [[TMP16]], float [[TMP18]])
+; IR-NEXT: br label [[TMP20]]
+; IR: 20:
+; IR-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
+; IR-NEXT: ret float [[TMP21]]
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
@@ -1007,164 +950,674 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
ret float %result
}
-
define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
-
define amdgpu_ps float @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fsub ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmin_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmin_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
ret float %result
}
define amdgpu_ps float @global_atomic_fadd_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-ITERATIVE-NEXT: ret float [[RESULT]]
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
-; IR-DPP-NEXT: ret float [[RESULT]]
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret float [[RESULT]]
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
ret float %result
}
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double
+; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]])
+; IR-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]])
+; IR-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-NEXT: [[TMP27:%.*]] = uitofp i32 [[TMP8]] to double
+; IR-NEXT: [[TMP28:%.*]] = fmul double [[VAL]], [[TMP27]]
+; IR-NEXT: [[TMP29:%.*]] = fadd double [[TMP26]], [[TMP28]]
+; IR-NEXT: br label [[TMP30]]
+; IR: 30:
+; IR-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]])
+; IR-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]])
+; IR-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-NEXT: [[TMP23:%.*]] = uitofp i32 [[TMP8]] to double
+; IR-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0x7FF0000000000000, double [[VAL]]
+; IR-NEXT: [[TMP25:%.*]] = call double @llvm.minnum.f64(double [[TMP22]], double [[TMP24]])
+; IR-NEXT: br label [[TMP26]]
+; IR: 26:
+; IR-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-NEXT: ret double [[TMP27]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP12]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0xFFF0000000000000, double [[VAL]]
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP22]], double [[TMP24]], metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP26]]
+; IR-ITERATIVE: 26:
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP27]]
+;
+; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-DPP: 10:
+; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP12]]
+; IR-DPP: 12:
+; IR-DPP-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-DPP-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-DPP-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-DPP-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-DPP-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-DPP-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-DPP-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-DPP-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0xFFF0000000000000, double [[VAL]]
+; IR-DPP-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP22]], double [[TMP24]], metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP26]]
+; IR-DPP: 26:
+; IR-DPP-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-DPP-NEXT: ret double [[TMP27]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp }
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
index 76ec1cc..99d02ff 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
@@ -358,65 +358,6 @@ define amdgpu_gfx i32 @global_atomic_xchg_i32_ret_offset_scalar(ptr addrspace(1)
; ---------------------------------------------------------------------
define void @global_atomic_xchg_f32_noret(ptr addrspace(1) %ptr, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB0_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB0_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB0_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -450,69 +391,6 @@ define void @global_atomic_xchg_f32_noret(ptr addrspace(1) %ptr, float %in) {
}
define void @global_atomic_xchg_f32_noret_offset(ptr addrspace(1) %out, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f32_e32 v0, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB1_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB1_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB1_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -549,71 +427,6 @@ define void @global_atomic_xchg_f32_noret_offset(ptr addrspace(1) %out, float %i
}
define float @global_atomic_xchg_f32_ret(ptr addrspace(1) %ptr, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB2_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v0, v4
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB2_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v0, v4
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB2_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -648,73 +461,6 @@ define float @global_atomic_xchg_f32_ret(ptr addrspace(1) %ptr, float %in) {
}
define float @global_atomic_xchg_f32_ret_offset(ptr addrspace(1) %out, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f32_e32 v4, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB3_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB3_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB3_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -752,80 +498,6 @@ define float @global_atomic_xchg_f32_ret_offset(ptr addrspace(1) %out, float %in
}
define amdgpu_gfx void @global_atomic_xchg_f32_noret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB4_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB4_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB4_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -876,84 +548,6 @@ define amdgpu_gfx void @global_atomic_xchg_f32_noret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx void @global_atomic_xchg_f32_noret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s34
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB5_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s34
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB5_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB5_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1007,83 +601,6 @@ define amdgpu_gfx void @global_atomic_xchg_f32_noret_offset_scalar(ptr addrspace
}
define amdgpu_gfx float @global_atomic_xchg_f32_ret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB6_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB6_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB6_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1134,87 +651,6 @@ define amdgpu_gfx float @global_atomic_xchg_f32_ret_scalar(ptr addrspace(1) inre
}
define amdgpu_gfx float @global_atomic_xchg_f32_ret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB7_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB7_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB7_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
index d137f47..380ce7f 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -372,65 +372,6 @@ define amdgpu_gfx i64 @global_atomic_xchg_i64_ret_offset_scalar(ptr addrspace(1)
; ---------------------------------------------------------------------
define void @global_atomic_xchg_f64_noret(ptr addrspace(1) %ptr, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB0_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB0_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB0_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -464,69 +405,6 @@ define void @global_atomic_xchg_f64_noret(ptr addrspace(1) %ptr, double %in) {
}
define void @global_atomic_xchg_f64_noret_offset(ptr addrspace(1) %out, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f64_e32 v0, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB1_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB1_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB1_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -563,71 +441,6 @@ define void @global_atomic_xchg_f64_noret_offset(ptr addrspace(1) %out, double %
}
define double @global_atomic_xchg_f64_ret(ptr addrspace(1) %ptr, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB2_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v0, v4
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB2_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v0, v4
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB2_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -663,73 +476,6 @@ define double @global_atomic_xchg_f64_ret(ptr addrspace(1) %ptr, double %in) {
}
define double @global_atomic_xchg_f64_ret_offset(ptr addrspace(1) %out, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f64_e32 v4, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB3_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB3_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB3_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -768,80 +514,6 @@ define double @global_atomic_xchg_f64_ret_offset(ptr addrspace(1) %out, double %
}
define amdgpu_gfx void @global_atomic_xchg_f64_noret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB4_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB4_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB4_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -896,84 +568,6 @@ define amdgpu_gfx void @global_atomic_xchg_f64_noret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx void @global_atomic_xchg_f64_noret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s34
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB5_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s34
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB5_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB5_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1029,83 +623,6 @@ define amdgpu_gfx void @global_atomic_xchg_f64_noret_offset_scalar(ptr addrspace
}
define amdgpu_gfx double @global_atomic_xchg_f64_ret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB6_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB6_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB6_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1160,87 +677,6 @@ define amdgpu_gfx double @global_atomic_xchg_f64_ret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx double @global_atomic_xchg_f64_ret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB7_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB7_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB7_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
index fab24e1..86e3d93 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
declare i32 @llvm.amdgcn.workitem.id.x()
define amdgpu_kernel void @global_atomic_fadd_uni_value(ptr addrspace(1) %ptr) #0 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_value(
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
index f87932b..b9234f4 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
@@ -1,55 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
+
+; Tests various combinations of uniform/divergent address and uniform/divergent value inputs of various types for atomic operations.
+; Optimization remains same for Iterative and DPP strategies when value in uniform. These different scan/reduction
+; strategies are valid for only divergent values. This optimization is valid for divergent addresses. Test also covers different scopes.
define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-ITERATIVE: 14:
-; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP16]]
-; IR-ITERATIVE: 16:
-; IR-ITERATIVE-NEXT: br label [[TMP17]]
-; IR-ITERATIVE: 17:
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
-; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
-; IR-DPP-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
-; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
-; IR-DPP: 14:
-; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP16]]
-; IR-DPP: 16:
-; IR-DPP-NEXT: br label [[TMP17]]
-; IR-DPP: 17:
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float
+; IR-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: br label [[TMP17]]
+; IR: 17:
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret void
@@ -325,7 +305,6 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_uni_value_agent_scope_stri
ret void
}
-
define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
@@ -409,45 +388,25 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_stri
}
define amdgpu_ps void @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-ITERATIVE: 10:
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP12]]
-; IR-ITERATIVE: 12:
-; IR-ITERATIVE-NEXT: br label [[TMP13]]
-; IR-ITERATIVE: 13:
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
-; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-DPP: 10:
-; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP12]]
-; IR-DPP: 12:
-; IR-DPP-NEXT: br label [[TMP13]]
-; IR-DPP: 13:
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: br label [[TMP13]]
+; IR: 13:
+; IR-NEXT: ret void
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
@@ -797,161 +756,531 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_system_scope_str
ret void
}
-
define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret void
}
define amdgpu_ps void @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4
ret void
}
define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fsub_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
-
define amdgpu_ps void @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fsub_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fsub ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fmin_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, float inreg %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmin_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fmin_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, float %val) #0 {
-; IR-ITERATIVE-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmin_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
;
%result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
}
define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double
+; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: br label [[TMP17]]
+; IR: 17:
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
ret void
}
-define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, float inreg %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+define amdgpu_ps void @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: br label [[TMP13]]
+; IR: 13:
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP12]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: br label [[TMP13]]
+; IR-ITERATIVE: 13:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-DPP: 10:
+; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP12]]
+; IR-DPP: 12:
+; IR-DPP-NEXT: br label [[TMP13]]
+; IR-DPP: 13:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, float %val) #2 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_div_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] monotonic, align 4
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
; IR-DPP-NEXT: ret void
;
- %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
index 96c615b..4f00d48 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare double @div.double.value()
define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe:
@@ -5408,6 +5409,5583 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
+; GFX7LESS-NEXT: .LBB9_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-NEXT: .LBB9_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-NEXT: .LBB9_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-NEXT: .LBB9_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-NEXT: .LBB9_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-NEXT: .LBB9_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-DPP-NEXT: .LBB9_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-DPP-NEXT: .LBB9_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-DPP-NEXT: .LBB9_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-DPP-NEXT: .LBB9_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-DPP-NEXT: .LBB9_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value()
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB11_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_2
+; GFX7LESS-NEXT: .LBB11_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-NEXT: .LBB11_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-NEXT: .LBB11_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-NEXT: .LBB11_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-NEXT: .LBB11_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-NEXT: .LBB11_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-DPP-NEXT: .LBB11_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-DPP-NEXT: .LBB11_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-DPP-NEXT: .LBB11_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-DPP-NEXT: .LBB11_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-DPP-NEXT: .LBB11_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr) #2{
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB13_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB13_2
+; GFX7LESS-NEXT: .LBB13_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-NEXT: .LBB13_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-NEXT: .LBB13_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-NEXT: .LBB13_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-NEXT: .LBB13_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-NEXT: .LBB13_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-DPP-NEXT: .LBB13_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-DPP-NEXT: .LBB13_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-DPP-NEXT: .LBB13_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-DPP-NEXT: .LBB13_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-DPP-NEXT: .LBB13_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
+; GFX7LESS-NEXT: .LBB16_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-NEXT: .LBB16_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-NEXT: .LBB16_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-NEXT: .LBB16_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-NEXT: .LBB16_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-NEXT: .LBB16_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-DPP-NEXT: .LBB16_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-DPP-NEXT: .LBB16_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-DPP-NEXT: .LBB16_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-DPP-NEXT: .LBB16_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-DPP-NEXT: .LBB16_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
index 3cc5a4c..622be43 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare float @div.double.value()
define amdgpu_kernel void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe:
@@ -3550,6 +3551,3965 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
+; GFX7LESS-NEXT: .LBB6_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-NEXT: .LBB6_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-NEXT: .LBB6_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-NEXT: .LBB6_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-NEXT: .LBB6_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-NEXT: .LBB6_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-DPP-NEXT: .LBB6_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-DPP-NEXT: .LBB6_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-DPP-NEXT: .LBB6_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-DPP-NEXT: .LBB6_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-DPP-NEXT: .LBB6_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB8_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB8_2
+; GFX7LESS-NEXT: .LBB8_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-NEXT: .LBB8_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-NEXT: .LBB8_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-NEXT: .LBB8_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-NEXT: .LBB8_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-NEXT: .LBB8_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-DPP-NEXT: .LBB8_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-DPP-NEXT: .LBB8_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-DPP-NEXT: .LBB8_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-DPP-NEXT: .LBB8_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-DPP-NEXT: .LBB8_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
+; GFX7LESS-NEXT: .LBB10_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-NEXT: .LBB10_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-NEXT: .LBB10_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-NEXT: .LBB10_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-NEXT: .LBB10_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-NEXT: .LBB10_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-DPP-NEXT: .LBB10_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-DPP-NEXT: .LBB10_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-DPP-NEXT: .LBB10_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-DPP-NEXT: .LBB10_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-DPP-NEXT: .LBB10_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
index 314c52a..49d415c 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare float @div.double.value()
define amdgpu_kernel void @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe:
@@ -3550,6 +3551,3965 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
+; GFX7LESS-NEXT: .LBB6_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-NEXT: .LBB6_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-NEXT: .LBB6_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-NEXT: .LBB6_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-NEXT: .LBB6_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-NEXT: .LBB6_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-DPP-NEXT: .LBB6_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-DPP-NEXT: .LBB6_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-DPP-NEXT: .LBB6_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-DPP-NEXT: .LBB6_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-DPP-NEXT: .LBB6_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB8_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB8_2
+; GFX7LESS-NEXT: .LBB8_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-NEXT: .LBB8_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-NEXT: .LBB8_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-NEXT: .LBB8_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-NEXT: .LBB8_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-NEXT: .LBB8_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-DPP-NEXT: .LBB8_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-DPP-NEXT: .LBB8_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-DPP-NEXT: .LBB8_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-DPP-NEXT: .LBB8_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-DPP-NEXT: .LBB8_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
+; GFX7LESS-NEXT: .LBB10_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-NEXT: .LBB10_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-NEXT: .LBB10_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-NEXT: .LBB10_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-NEXT: .LBB10_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-NEXT: .LBB10_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-DPP-NEXT: .LBB10_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-DPP-NEXT: .LBB10_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-DPP-NEXT: .LBB10_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-DPP-NEXT: .LBB10_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-DPP-NEXT: .LBB10_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
index bc9125e..7a7ddbe 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare double @div.double.value()
define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fsub_uni_address_uni_value_agent_scope_unsafe:
@@ -5616,6 +5617,5581 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
+; GFX7LESS-NEXT: .LBB9_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-NEXT: .LBB9_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-NEXT: .LBB9_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-NEXT: .LBB9_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-NEXT: .LBB9_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-NEXT: .LBB9_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-DPP-NEXT: .LBB9_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-DPP-NEXT: .LBB9_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-DPP-NEXT: .LBB9_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-DPP-NEXT: .LBB9_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-DPP-NEXT: .LBB9_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value()
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB11_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_2
+; GFX7LESS-NEXT: .LBB11_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-NEXT: .LBB11_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-NEXT: .LBB11_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-NEXT: .LBB11_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-NEXT: .LBB11_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-NEXT: .LBB11_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-DPP-NEXT: .LBB11_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-DPP-NEXT: .LBB11_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-DPP-NEXT: .LBB11_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-DPP-NEXT: .LBB11_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-DPP-NEXT: .LBB11_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr) #2{
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB13_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB13_2
+; GFX7LESS-NEXT: .LBB13_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-NEXT: .LBB13_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-NEXT: .LBB13_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-NEXT: .LBB13_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-NEXT: .LBB13_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-NEXT: .LBB13_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-DPP-NEXT: .LBB13_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-DPP-NEXT: .LBB13_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-DPP-NEXT: .LBB13_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-DPP-NEXT: .LBB13_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-DPP-NEXT: .LBB13_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
+; GFX7LESS-NEXT: .LBB16_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-NEXT: .LBB16_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-NEXT: .LBB16_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-NEXT: .LBB16_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-NEXT: .LBB16_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-NEXT: .LBB16_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-DPP-NEXT: .LBB16_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-DPP-NEXT: .LBB16_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-DPP-NEXT: .LBB16_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-DPP-NEXT: .LBB16_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-DPP-NEXT: .LBB16_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp}
diff --git a/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir b/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
index bdd89a9..dde84af 100644
--- a/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
@@ -13,6 +13,7 @@
name: greedy_fail_alloc_sgpr1024_spill
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
explicitKernArgSize: 16
diff --git a/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll b/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
index a5792bf..4c21f87 100644
--- a/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
+++ b/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
@@ -258,25 +258,25 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
;.
; V4: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V4: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V5: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V5: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V6: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V6: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V4: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 400}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
index e015095a..ab160ff 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
@@ -92,7 +92,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
; DAGISEL-GFX11-NEXT: $vgpr5 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr6 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr7 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -122,7 +121,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
; DAGISEL-GFX10-NEXT: $vgpr5 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr6 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr7 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -234,7 +232,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -272,7 +269,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -404,7 +400,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr12 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr13 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -454,7 +449,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr12 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr13 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -506,7 +500,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -524,7 +517,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -576,7 +568,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -594,7 +585,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -646,7 +636,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -664,7 +653,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -716,7 +704,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -734,7 +721,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -870,7 +856,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
; DAGISEL-GFX11-NEXT: $vgpr13 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr14 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr15 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -916,7 +901,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
; DAGISEL-GFX10-NEXT: $vgpr13 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr14 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr15 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -2480,7 +2464,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
; DAGISEL-GFX11-NEXT: $vgpr29 = COPY [[COPY134]]
; DAGISEL-GFX11-NEXT: $vgpr30 = COPY [[COPY133]]
; DAGISEL-GFX11-NEXT: $vgpr31 = COPY [[COPY132]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -2827,7 +2810,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
; DAGISEL-GFX10-NEXT: $vgpr29 = COPY [[COPY134]]
; DAGISEL-GFX10-NEXT: $vgpr30 = COPY [[COPY133]]
; DAGISEL-GFX10-NEXT: $vgpr31 = COPY [[COPY132]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
new file mode 100644
index 0000000..bfeb214
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
@@ -0,0 +1,1618 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GISEL %s
+
+define float @sitofp_i128_to_f32(i128 %x) {
+; SDAG-LABEL: sitofp_i128_to_f32:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB0_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ashrrev_i32_e32 v5, 31, v3
+; SDAG-NEXT: v_xor_b32_e32 v0, v5, v0
+; SDAG-NEXT: v_xor_b32_e32 v1, v5, v1
+; SDAG-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v5
+; SDAG-NEXT: v_xor_b32_e32 v2, v5, v2
+; SDAG-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v5, vcc
+; SDAG-NEXT: v_xor_b32_e32 v6, v5, v3
+; SDAG-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; SDAG-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v2, v4
+; SDAG-NEXT: v_add_u32_e32 v2, 32, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v5
+; SDAG-NEXT: v_min_u32_e32 v2, v2, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v0
+; SDAG-NEXT: v_add_u32_e32 v6, 32, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v7, v1
+; SDAG-NEXT: v_min_u32_e32 v6, v6, v7
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_add_u32_e32 v6, 64, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v6, v2, vcc
+; SDAG-NEXT: v_sub_u32_e32 v6, 0x80, v7
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x7f, v7
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff98, v7
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; SDAG-NEXT: v_cndmask_b32_e32 v8, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB0_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v12, 0x66, v7
+; SDAG-NEXT: v_sub_u32_e32 v10, 64, v12
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[10:11], v10, v[4:5]
+; SDAG-NEXT: v_sub_u32_e32 v13, 38, v7
+; SDAG-NEXT: v_or_b32_e32 v11, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v13, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v12
+; SDAG-NEXT: v_add_u32_e32 v14, 26, v7
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v12
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[10:11], v13, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[12:13], v14, v[4:5]
+; SDAG-NEXT: v_subrev_u32_e32 v7, 38, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v15, v8, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[7:8], v7, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v11, v13, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v14, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, v7, v4, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v5
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v15, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v8
+; SDAG-NEXT: v_mov_b32_e32 v1, v9
+; SDAG-NEXT: .LBB0_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB0_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v4, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v4, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v2, v6
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB0_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x80000000, v3
+; SDAG-NEXT: v_lshl_add_u32 v1, v2, 23, 1.0
+; SDAG-NEXT: v_and_b32_e32 v2, 0x7fffff, v8
+; SDAG-NEXT: v_or3_b32 v4, v2, v0, v1
+; SDAG-NEXT: .LBB0_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: sitofp_i128_to_f32:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB0_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v3
+; GISEL-NEXT: v_xor_b32_e32 v0, v6, v0
+; GISEL-NEXT: v_xor_b32_e32 v1, v6, v1
+; GISEL-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v6
+; GISEL-NEXT: v_xor_b32_e32 v2, v6, v2
+; GISEL-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GISEL-NEXT: v_xor_b32_e32 v3, v6, v3
+; GISEL-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v7, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v7, 32, v7
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v7
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v8, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v8
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB0_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v12, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v13, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v9, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v10, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, -1
+; GISEL-NEXT: v_subrev_u32_e32 v14, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v16, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v14, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v11, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v12, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v9, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v10, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v11, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v13, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB0_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB0_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v7, v8
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB0_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v0, 0x80000000, v6
+; GISEL-NEXT: v_lshl_add_u32 v1, v7, 23, 1.0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_or3_b32 v4, v2, v0, v1
+; GISEL-NEXT: .LBB0_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = sitofp i128 %x to float
+ ret float %cvt
+}
+
+define float @uitofp_i128_to_f32(i128 %x) {
+; SDAG-LABEL: uitofp_i128_to_f32:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB1_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ffbh_u32_e32 v4, v2
+; SDAG-NEXT: v_add_u32_e32 v4, 32, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v3
+; SDAG-NEXT: v_min_u32_e32 v4, v4, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v1
+; SDAG-NEXT: v_min_u32_e32 v5, v5, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v5, 64, v5
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; SDAG-NEXT: v_sub_u32_e32 v5, 0x80, v6
+; SDAG-NEXT: v_sub_u32_e32 v4, 0x7f, v6
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff98, v6
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cndmask_b32_e32 v7, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB1_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v11, 0x66, v6
+; SDAG-NEXT: v_sub_u32_e32 v9, 64, v11
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v11, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; SDAG-NEXT: v_sub_u32_e32 v12, 38, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v7, v9
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v12, v[2:3]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
+; SDAG-NEXT: v_add_u32_e32 v13, 26, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[9:10], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[11:12], v13, v[2:3]
+; SDAG-NEXT: v_subrev_u32_e32 v6, 38, v6
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v7, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[6:7], v6, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v11, v9
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v13
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v13, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v13
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v7, v14, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v7
+; SDAG-NEXT: v_mov_b32_e32 v1, v8
+; SDAG-NEXT: .LBB1_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB1_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v2, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v2, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v4, v5
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB1_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v7
+; SDAG-NEXT: v_lshl_or_b32 v0, v4, 23, v0
+; SDAG-NEXT: v_add_u32_e32 v4, 1.0, v0
+; SDAG-NEXT: .LBB1_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: uitofp_i128_to_f32:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB1_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v6, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v6, 32, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v6, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v7
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr7
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB1_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v12, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v10, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v12, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v8, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v9, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, -1
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v14, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[10:11], v13, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, 0, v8, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v10, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v11, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v8, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v9, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v10, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v12, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB1_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB1_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v6, v7
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB1_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_add_u32 v0, v6, 23, 1.0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0x7fffff
+; GISEL-NEXT: v_and_or_b32 v4, v4, v1, v0
+; GISEL-NEXT: .LBB1_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = uitofp i128 %x to float
+ ret float %cvt
+}
+
+define double @sitofp_i128_to_f64(i128 %x) {
+; SDAG-LABEL: sitofp_i128_to_f64:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_or_b32_e32 v1, v5, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v4, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB2_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; SDAG-NEXT: v_xor_b32_e32 v4, v0, v4
+; SDAG-NEXT: v_xor_b32_e32 v5, v0, v5
+; SDAG-NEXT: v_sub_co_u32_e32 v4, vcc, v4, v0
+; SDAG-NEXT: v_xor_b32_e32 v2, v0, v2
+; SDAG-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v0, vcc
+; SDAG-NEXT: v_xor_b32_e32 v1, v0, v3
+; SDAG-NEXT: v_subb_co_u32_e32 v6, vcc, v2, v0, vcc
+; SDAG-NEXT: v_subb_co_u32_e32 v7, vcc, v1, v0, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v0, v6
+; SDAG-NEXT: v_add_u32_e32 v0, 32, v0
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v7
+; SDAG-NEXT: v_min_u32_e32 v0, v0, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v1, v4
+; SDAG-NEXT: v_add_u32_e32 v1, 32, v1
+; SDAG-NEXT: v_ffbh_u32_e32 v2, v5
+; SDAG-NEXT: v_min_u32_e32 v1, v1, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; SDAG-NEXT: v_add_u32_e32 v1, 64, v1
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v1, v0, vcc
+; SDAG-NEXT: v_sub_u32_e32 v8, 0x80, v9
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x7f, v9
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 54, v8
+; SDAG-NEXT: ; implicit-def: $vgpr10
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v6, 0xffffffb5, v9
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 54, v8
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 55, v8
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB2_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v12, 0x49, v9
+; SDAG-NEXT: v_sub_u32_e32 v10, 64, v12
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v12, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[10:11], v10, v[6:7]
+; SDAG-NEXT: v_sub_u32_e32 v13, 9, v9
+; SDAG-NEXT: v_or_b32_e32 v11, v1, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v0, v10
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], v13, v[6:7]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v12
+; SDAG-NEXT: v_add_u32_e32 v16, 55, v9
+; SDAG-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v12
+; SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[10:11], v12, v[6:7]
+; SDAG-NEXT: v_lshrrev_b64 v[12:13], v13, v[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[14:15], v16, v[6:7]
+; SDAG-NEXT: v_add_u32_e32 v9, -9, v9
+; SDAG-NEXT: v_or_b32_e32 v15, v15, v13
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v12
+; SDAG-NEXT: v_lshlrev_b64 v[12:13], v9, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v11, 0, v11, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v16
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v13, v15, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v16
+; SDAG-NEXT: v_lshlrev_b64 v[4:5], v16, v[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, v9, v7, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v12, v14, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v6, v9, v6, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
+; SDAG-NEXT: v_or_b32_e32 v5, v5, v7
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v6, v10
+; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_mov_b32_e32 v5, v1
+; SDAG-NEXT: v_mov_b32_e32 v4, v0
+; SDAG-NEXT: v_mov_b32_e32 v7, v11
+; SDAG-NEXT: .LBB2_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB2_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
+; SDAG-NEXT: v_lshrrev_b32_e32 v0, 31, v5
+; SDAG-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5]
+; SDAG-NEXT: v_or_b32_e32 v6, v6, v0
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v0, 2, v4
+; SDAG-NEXT: v_and_or_b32 v0, v0, 1, v4
+; SDAG-NEXT: v_add_co_u32_e32 v4, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], 2, v[4:5]
+; SDAG-NEXT: v_lshlrev_b32_e32 v7, 30, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v1, v7
+; SDAG-NEXT: v_and_b32_e32 v1, 0x800000, v5
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_lshrrev_b64 v[0:1], 3, v[4:5]
+; SDAG-NEXT: v_lshlrev_b32_e32 v2, 29, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v1, v2
+; SDAG-NEXT: v_mov_b32_e32 v2, v8
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB2_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v1, 0x80000000, v3
+; SDAG-NEXT: v_mov_b32_e32 v3, 0x3ff00000
+; SDAG-NEXT: v_lshl_add_u32 v2, v2, 20, v3
+; SDAG-NEXT: v_and_b32_e32 v3, 0xfffff, v10
+; SDAG-NEXT: v_or3_b32 v1, v3, v1, v2
+; SDAG-NEXT: .LBB2_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: sitofp_i128_to_f64:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_mov_b32_e32 v4, v0
+; GISEL-NEXT: v_mov_b32_e32 v5, v1
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_or_b32_e32 v0, v4, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v5, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB2_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v3
+; GISEL-NEXT: v_xor_b32_e32 v0, v6, v4
+; GISEL-NEXT: v_xor_b32_e32 v1, v6, v5
+; GISEL-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v6
+; GISEL-NEXT: v_xor_b32_e32 v2, v6, v2
+; GISEL-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GISEL-NEXT: v_xor_b32_e32 v3, v6, v3
+; GISEL-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v7, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v7, 32, v7
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v7
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v8, 0x80, v9
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x7f, v9
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 53, v8
+; GISEL-NEXT: ; implicit-def: $vgpr10
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffffb5, v9
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v1, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 55, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 55, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB2_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v14, 0x49, v9
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v14
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v14, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v15, 64, v14
+; GISEL-NEXT: v_or_b32_e32 v10, v4, v10
+; GISEL-NEXT: v_or_b32_e32 v11, v5, v11
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v15, v[2:3]
+; GISEL-NEXT: v_lshrrev_b64 v[12:13], v14, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; GISEL-NEXT: v_add_u32_e32 v14, 55, v9
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v14
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v4, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v4, v5, v1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e32 v5, 0, v12, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v14, -1
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, -1
+; GISEL-NEXT: v_subrev_u32_e32 v15, 64, v14
+; GISEL-NEXT: v_or_b32_e32 v16, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v17, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v15, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v11, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v12, v12, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v9, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v10, v3
+; GISEL-NEXT: v_and_or_b32 v0, v11, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v12, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v13, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB2_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB2_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[9:10], 1, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GISEL-NEXT: v_or_b32_e32 v11, v2, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v9
+; GISEL-NEXT: v_mov_b32_e32 v1, v10
+; GISEL-NEXT: v_mov_b32_e32 v2, v11
+; GISEL-NEXT: v_mov_b32_e32 v3, v12
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v3, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v3
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v9, 0
+; GISEL-NEXT: v_and_b32_e32 v10, 0x800000, v1
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GISEL-NEXT: v_lshl_or_b32 v10, v2, 30, v5
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v7, v8
+; GISEL-NEXT: v_lshl_or_b32 v10, v2, 29, v5
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB2_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v0, 0x80000000, v6
+; GISEL-NEXT: v_mov_b32_e32 v1, 0x3ff00000
+; GISEL-NEXT: v_mov_b32_e32 v2, 0xfffff
+; GISEL-NEXT: v_lshl_add_u32 v1, v7, 20, v1
+; GISEL-NEXT: v_and_or_b32 v2, v10, v2, v0
+; GISEL-NEXT: v_and_or_b32 v0, v4, -1, 0
+; GISEL-NEXT: v_or3_b32 v1, v2, v1, 0
+; GISEL-NEXT: .LBB2_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = sitofp i128 %x to double
+ ret double %cvt
+}
+
+define double @uitofp_i128_to_f64(i128 %x) {
+; SDAG-LABEL: uitofp_i128_to_f64:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: v_mov_b32_e32 v5, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB3_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ffbh_u32_e32 v4, v2
+; SDAG-NEXT: v_add_u32_e32 v4, 32, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v3
+; SDAG-NEXT: v_min_u32_e32 v4, v4, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v1
+; SDAG-NEXT: v_min_u32_e32 v5, v5, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v5, 64, v5
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v5, v4, vcc
+; SDAG-NEXT: v_sub_u32_e32 v7, 0x80, v8
+; SDAG-NEXT: v_sub_u32_e32 v6, 0x7f, v8
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 54, v7
+; SDAG-NEXT: ; implicit-def: $vgpr9
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffffb5, v8
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cndmask_b32_e32 v9, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 54, v7
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 55, v7
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB3_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v11, 0x49, v8
+; SDAG-NEXT: v_sub_u32_e32 v9, 64, v11
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], v11, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; SDAG-NEXT: v_sub_u32_e32 v12, 9, v8
+; SDAG-NEXT: v_or_b32_e32 v10, v5, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v4, v9
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], v12, v[2:3]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
+; SDAG-NEXT: v_add_u32_e32 v15, 55, v8
+; SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11
+; SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[9:10], v11, v[2:3]
+; SDAG-NEXT: v_lshrrev_b64 v[11:12], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[13:14], v15, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v8, -9, v8
+; SDAG-NEXT: v_or_b32_e32 v14, v14, v12
+; SDAG-NEXT: v_or_b32_e32 v13, v13, v11
+; SDAG-NEXT: v_lshlrev_b64 v[11:12], v8, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v15
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v5, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, v4, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v12, v14, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v15
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v15, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v8, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v11, v13, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v8, v2, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_mov_b32_e32 v2, v9
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v4, v4, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: v_mov_b32_e32 v3, v10
+; SDAG-NEXT: .LBB3_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB3_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
+; SDAG-NEXT: v_lshrrev_b32_e32 v3, 31, v1
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: v_or_b32_e32 v2, v2, v3
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v3, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v3, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; SDAG-NEXT: v_and_b32_e32 v3, 0x800000, v1
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; SDAG-NEXT: v_alignbit_b32 v9, v2, v1, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; SDAG-NEXT: v_alignbit_b32 v9, v2, v1, 3
+; SDAG-NEXT: v_mov_b32_e32 v6, v7
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB3_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v9
+; SDAG-NEXT: v_lshl_or_b32 v0, v6, 20, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 0x3ff00000, v0
+; SDAG-NEXT: .LBB3_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: v_mov_b32_e32 v1, v5
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: uitofp_i128_to_f64:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB3_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v6, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v6, 32, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x80, v8
+; GISEL-NEXT: v_sub_u32_e32 v6, 0x7f, v8
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 53, v7
+; GISEL-NEXT: ; implicit-def: $vgpr9
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffffb5, v8
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v1, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr7
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 55, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 55, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB3_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v13, 0x49, v8
+; GISEL-NEXT: v_sub_u32_e32 v9, 64, v13
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v13, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v14, 64, v13
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v13, v[2:3]
+; GISEL-NEXT: v_or_b32_e32 v9, v4, v9
+; GISEL-NEXT: v_or_b32_e32 v10, v5, v10
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v14, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v13
+; GISEL-NEXT: v_add_u32_e32 v8, 55, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v10, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v13
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v12, vcc
+; GISEL-NEXT: v_sub_u32_e32 v12, 64, v8
+; GISEL-NEXT: v_cndmask_b32_e64 v14, v4, v0, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v9, v5, v1, s[4:5]
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], v8, -1
+; GISEL-NEXT: v_lshlrev_b64 v[12:13], v12, -1
+; GISEL-NEXT: v_subrev_u32_e32 v15, 64, v8
+; GISEL-NEXT: v_or_b32_e32 v16, v4, v12
+; GISEL-NEXT: v_or_b32_e32 v17, v5, v13
+; GISEL-NEXT: v_lshrrev_b64 v[12:13], v15, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v8
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v8, v12, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v12, v13, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v4, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v5, v3
+; GISEL-NEXT: v_and_or_b32 v0, v8, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v12, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v8, v14, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v8
+; GISEL-NEXT: v_mov_b32_e32 v1, v9
+; GISEL-NEXT: v_mov_b32_e32 v2, v10
+; GISEL-NEXT: v_mov_b32_e32 v3, v11
+; GISEL-NEXT: .LBB3_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB3_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[8:9], 1, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], 1, v[2:3]
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GISEL-NEXT: v_or_b32_e32 v10, v10, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v8
+; GISEL-NEXT: v_mov_b32_e32 v1, v9
+; GISEL-NEXT: v_mov_b32_e32 v2, v10
+; GISEL-NEXT: v_mov_b32_e32 v3, v11
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v4, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v4
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GISEL-NEXT: v_mov_b32_e32 v8, 0
+; GISEL-NEXT: v_and_b32_e32 v9, 0x800000, v1
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_lshlrev_b64 v[8:9], 30, v[2:3]
+; GISEL-NEXT: v_lshrrev_b32_e32 v5, 2, v1
+; GISEL-NEXT: v_or_b32_e32 v9, v5, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshlrev_b64 v[2:3], 29, v[2:3]
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_lshrrev_b32_e32 v0, 3, v1
+; GISEL-NEXT: v_or_b32_e32 v9, v0, v2
+; GISEL-NEXT: v_mov_b32_e32 v6, v7
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB3_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_mov_b32_e32 v0, 0x3ff00000
+; GISEL-NEXT: v_lshl_add_u32 v0, v6, 20, v0
+; GISEL-NEXT: v_and_b32_e32 v1, 0xfffff, v9
+; GISEL-NEXT: v_and_or_b32 v4, v4, -1, 0
+; GISEL-NEXT: v_or3_b32 v5, v1, v0, 0
+; GISEL-NEXT: .LBB3_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: v_mov_b32_e32 v1, v5
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = uitofp i128 %x to double
+ ret double %cvt
+}
+
+define half @sitofp_i128_to_f16(i128 %x) {
+; SDAG-LABEL: sitofp_i128_to_f16:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB4_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ashrrev_i32_e32 v5, 31, v3
+; SDAG-NEXT: v_xor_b32_e32 v0, v5, v0
+; SDAG-NEXT: v_xor_b32_e32 v1, v5, v1
+; SDAG-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v5
+; SDAG-NEXT: v_xor_b32_e32 v2, v5, v2
+; SDAG-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v5, vcc
+; SDAG-NEXT: v_xor_b32_e32 v6, v5, v3
+; SDAG-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; SDAG-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
+; SDAG-NEXT: v_ffbh_u32_e32 v2, v4
+; SDAG-NEXT: v_add_u32_e32 v2, 32, v2
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v5
+; SDAG-NEXT: v_min_u32_e32 v2, v2, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v0
+; SDAG-NEXT: v_add_u32_e32 v6, 32, v6
+; SDAG-NEXT: v_ffbh_u32_e32 v7, v1
+; SDAG-NEXT: v_min_u32_e32 v6, v6, v7
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_add_u32_e32 v6, 64, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v6, v2, vcc
+; SDAG-NEXT: v_sub_u32_e32 v6, 0x80, v7
+; SDAG-NEXT: v_sub_u32_e32 v2, 0x7f, v7
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: ; implicit-def: $vgpr8
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff98, v7
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; SDAG-NEXT: v_cndmask_b32_e32 v8, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB4_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB4_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v6
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB4_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v12, 0x66, v7
+; SDAG-NEXT: v_sub_u32_e32 v10, 64, v12
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[10:11], v10, v[4:5]
+; SDAG-NEXT: v_sub_u32_e32 v13, 38, v7
+; SDAG-NEXT: v_or_b32_e32 v11, v9, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_lshrrev_b64 v[8:9], v13, v[4:5]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v12
+; SDAG-NEXT: v_add_u32_e32 v14, 26, v7
+; SDAG-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v12
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[10:11], v13, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[12:13], v14, v[4:5]
+; SDAG-NEXT: v_subrev_u32_e32 v7, 38, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v15, v8, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[7:8], v7, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v9, v9, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v11, v13, v11
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v14, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v4, v7, v4, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v5
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v4
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v8, v15, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v8
+; SDAG-NEXT: v_mov_b32_e32 v1, v9
+; SDAG-NEXT: .LBB4_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB4_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v4, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v4, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v4, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v8, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v2, v6
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB4_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x80000000, v3
+; SDAG-NEXT: v_lshl_add_u32 v1, v2, 23, 1.0
+; SDAG-NEXT: v_and_b32_e32 v2, 0x7fffff, v8
+; SDAG-NEXT: v_or3_b32 v0, v2, v0, v1
+; SDAG-NEXT: v_cvt_f16_f32_e32 v4, v0
+; SDAG-NEXT: .LBB4_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: sitofp_i128_to_f16:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB4_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v3
+; GISEL-NEXT: v_xor_b32_e32 v0, v6, v0
+; GISEL-NEXT: v_xor_b32_e32 v1, v6, v1
+; GISEL-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v6
+; GISEL-NEXT: v_xor_b32_e32 v2, v6, v2
+; GISEL-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GISEL-NEXT: v_xor_b32_e32 v3, v6, v3
+; GISEL-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v7, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v7, 32, v7
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v7
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v8, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v8
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr8
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB4_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB4_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v8
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB4_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v12, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v13, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v11, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v9, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v10, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[9:10], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[11:12], v11, -1
+; GISEL-NEXT: v_subrev_u32_e32 v14, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_or_b32_e32 v16, v10, v12
+; GISEL-NEXT: v_lshrrev_b64 v[11:12], v14, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v11, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v11, v12, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v9, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v10, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v11, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v13, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB4_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB4_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v7, v8
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB4_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_and_b32_e32 v0, 0x80000000, v6
+; GISEL-NEXT: v_lshl_add_u32 v1, v7, 23, 1.0
+; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4
+; GISEL-NEXT: v_or3_b32 v0, v2, v0, v1
+; GISEL-NEXT: v_cvt_f16_f32_e32 v4, v0
+; GISEL-NEXT: .LBB4_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = sitofp i128 %x to half
+ ret half %cvt
+}
+
+define half @uitofp_i128_to_f16(i128 %x) {
+; SDAG-LABEL: uitofp_i128_to_f16:
+; SDAG: ; %bb.0: ; %itofp-entry
+; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT: v_or_b32_e32 v5, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v4, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB5_14
+; SDAG-NEXT: ; %bb.1: ; %itofp-if-end
+; SDAG-NEXT: v_ffbh_u32_e32 v4, v2
+; SDAG-NEXT: v_add_u32_e32 v4, 32, v4
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v3
+; SDAG-NEXT: v_min_u32_e32 v4, v4, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v5, v0
+; SDAG-NEXT: v_add_u32_e32 v5, 32, v5
+; SDAG-NEXT: v_ffbh_u32_e32 v6, v1
+; SDAG-NEXT: v_min_u32_e32 v5, v5, v6
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; SDAG-NEXT: v_add_u32_e32 v5, 64, v5
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; SDAG-NEXT: v_sub_u32_e32 v5, 0x80, v6
+; SDAG-NEXT: v_sub_u32_e32 v4, 0x7f, v6
+; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: ; implicit-def: $vgpr7
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
+; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff98, v6
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cndmask_b32_e32 v7, 0, v0, vcc
+; SDAG-NEXT: ; implicit-def: $vgpr5
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: ; implicit-def: $vgpr6
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB5_13
+; SDAG-NEXT: ; %bb.4: ; %NodeBlock
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, 25, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB5_8
+; SDAG-NEXT: ; %bb.5: ; %LeafBlock
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 26, v5
+; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB5_7
+; SDAG-NEXT: ; %bb.6: ; %itofp-sw-default
+; SDAG-NEXT: v_sub_u32_e32 v11, 0x66, v6
+; SDAG-NEXT: v_sub_u32_e32 v9, 64, v11
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v11, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[9:10], v9, v[2:3]
+; SDAG-NEXT: v_sub_u32_e32 v12, 38, v6
+; SDAG-NEXT: v_or_b32_e32 v10, v8, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v7, v9
+; SDAG-NEXT: v_lshrrev_b64 v[7:8], v12, v[2:3]
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
+; SDAG-NEXT: v_add_u32_e32 v13, 26, v6
+; SDAG-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; SDAG-NEXT: v_lshrrev_b64 v[9:10], v12, v[0:1]
+; SDAG-NEXT: v_lshlrev_b64 v[11:12], v13, v[2:3]
+; SDAG-NEXT: v_subrev_u32_e32 v6, 38, v6
+; SDAG-NEXT: v_cndmask_b32_e64 v14, v7, v0, s[4:5]
+; SDAG-NEXT: v_lshlrev_b64 v[6:7], v6, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[4:5]
+; SDAG-NEXT: v_or_b32_e32 v10, v12, v10
+; SDAG-NEXT: v_or_b32_e32 v9, v11, v9
+; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v13
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], v13, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v13
+; SDAG-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_or_b32_e32 v7, v14, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, v7
+; SDAG-NEXT: v_mov_b32_e32 v1, v8
+; SDAG-NEXT: .LBB5_7: ; %Flow1
+; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
+; SDAG-NEXT: .LBB5_8: ; %Flow2
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
+; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: v_lshrrev_b32_e32 v2, 2, v0
+; SDAG-NEXT: v_and_or_b32 v0, v2, 1, v0
+; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 2
+; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
+; SDAG-NEXT: v_alignbit_b32 v7, v1, v0, 3
+; SDAG-NEXT: v_mov_b32_e32 v4, v5
+; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; SDAG-NEXT: .LBB5_13: ; %Flow4
+; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
+; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v7
+; SDAG-NEXT: v_lshl_or_b32 v0, v4, 23, v0
+; SDAG-NEXT: v_add_u32_e32 v0, 1.0, v0
+; SDAG-NEXT: v_cvt_f16_f32_e32 v4, v0
+; SDAG-NEXT: .LBB5_14: ; %Flow5
+; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v0, v4
+; SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: uitofp_i128_to_f16:
+; GISEL: ; %bb.0: ; %itofp-entry
+; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT: v_or_b32_e32 v4, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v5, v1, v3
+; GISEL-NEXT: s_mov_b32 s4, 0
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB5_14
+; GISEL-NEXT: ; %bb.1: ; %itofp-if-end
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v0
+; GISEL-NEXT: v_ffbh_u32_e32 v4, v1
+; GISEL-NEXT: v_add_u32_e32 v5, 32, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v6, v2
+; GISEL-NEXT: v_min_u32_e32 v4, v4, v5
+; GISEL-NEXT: v_ffbh_u32_e32 v5, v3
+; GISEL-NEXT: v_add_u32_e32 v6, 32, v6
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: v_add_u32_e32 v4, 64, v4
+; GISEL-NEXT: v_min_u32_e32 v5, v5, v6
+; GISEL-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc
+; GISEL-NEXT: v_sub_u32_e32 v7, 0x80, v5
+; GISEL-NEXT: v_sub_u32_e32 v6, 0x7f, v5
+; GISEL-NEXT: v_cmp_ge_i32_e32 vcc, 24, v7
+; GISEL-NEXT: ; implicit-def: $vgpr4
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
+; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
+; GISEL-NEXT: ; implicit-def: $vgpr7
+; GISEL-NEXT: ; implicit-def: $vgpr0
+; GISEL-NEXT: ; implicit-def: $vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr2
+; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB5_13
+; GISEL-NEXT: ; %bb.4: ; %NodeBlock
+; GISEL-NEXT: v_cmp_le_i32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB5_8
+; GISEL-NEXT: ; %bb.5: ; %LeafBlock
+; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 26, v7
+; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB5_7
+; GISEL-NEXT: ; %bb.6: ; %itofp-sw-default
+; GISEL-NEXT: v_sub_u32_e32 v4, 0x66, v5
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v4
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v4, v[0:1]
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, v[2:3]
+; GISEL-NEXT: v_subrev_u32_e32 v12, 64, v4
+; GISEL-NEXT: v_or_b32_e32 v10, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v11, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v12, v[2:3]
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GISEL-NEXT: v_add_u32_e32 v5, 26, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GISEL-NEXT: v_sub_u32_e32 v10, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v8, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v4, v9, v1, vcc
+; GISEL-NEXT: v_lshrrev_b64 v[8:9], v5, -1
+; GISEL-NEXT: v_lshlrev_b64 v[10:11], v10, -1
+; GISEL-NEXT: v_subrev_u32_e32 v13, 64, v5
+; GISEL-NEXT: v_or_b32_e32 v14, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v15, v9, v11
+; GISEL-NEXT: v_lshrrev_b64 v[10:11], v13, -1
+; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc
+; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v5
+; GISEL-NEXT: v_cndmask_b32_e32 v8, 0, v8, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v5, v10, -1, s[4:5]
+; GISEL-NEXT: v_cndmask_b32_e64 v10, v11, -1, s[4:5]
+; GISEL-NEXT: v_and_b32_e32 v2, v8, v2
+; GISEL-NEXT: v_and_b32_e32 v3, v9, v3
+; GISEL-NEXT: v_and_or_b32 v0, v5, v0, v2
+; GISEL-NEXT: v_and_or_b32 v1, v10, v1, v3
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v3, v12, v0
+; GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GISEL-NEXT: v_mov_b32_e32 v1, v4
+; GISEL-NEXT: v_mov_b32_e32 v2, v5
+; GISEL-NEXT: v_mov_b32_e32 v3, v6
+; GISEL-NEXT: .LBB5_7: ; %Flow1
+; GISEL-NEXT: s_or_b64 exec, exec, s[12:13]
+; GISEL-NEXT: .LBB5_8: ; %Flow2
+; GISEL-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; GISEL-NEXT: ; %bb.9: ; %itofp-sw-bb
+; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
+; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
+; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 2, v[0:1]
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
+; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v6, v7
+; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GISEL-NEXT: .LBB5_13: ; %Flow4
+; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
+; GISEL-NEXT: v_lshl_add_u32 v0, v6, 23, 1.0
+; GISEL-NEXT: v_mov_b32_e32 v1, 0x7fffff
+; GISEL-NEXT: v_and_or_b32 v0, v4, v1, v0
+; GISEL-NEXT: v_cvt_f16_f32_e32 v4, v0
+; GISEL-NEXT: .LBB5_14: ; %Flow5
+; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-NEXT: v_mov_b32_e32 v0, v4
+; GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cvt = uitofp i128 %x to half
+ ret half %cvt
+}
+
+; FIXME: ExpandLargeFpConvert asserts on bfloat
+; define bfloat @sitofp_i128_to_bf16(i128 %x) {
+; %cvt = sitofp i128 %x to bfloat
+; ret bfloat %cvt
+; }
+
+; define bfloat @uitofp_i128_to_bf16(i128 %x) {
+; %cvt = uitofp i128 %x to bfloat
+; ret bfloat %cvt
+; }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GCN: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
index 1acbb09..fbf2ee1 100644
--- a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
+++ b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
@@ -60,7 +60,6 @@ define amdgpu_kernel void @test_kernel(i32 %val) #0 {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_or_saveexec_b64 s[34:35], -1
diff --git a/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll b/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll
new file mode 100644
index 0000000..d101d8d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds < %s 2>&1 | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s 2>&1 | FileCheck %s
+
+; This looks like a partially lowered module, but the non-lowered GV isn't used by any kernels.
+; In such cases, LowerModuleLDS is free to leave it in and ignore it, and we want to make sure
+; LowerModuleLDS doesn't crash if it re-runs on such modules.
+@notLowered = addrspace(3) global i32 poison
+@lowered = addrspace(3) global i32 poison, !absolute_symbol !0
+
+@llvm.compiler.used = appending addrspace(1) global [1 x ptr] [ptr addrspacecast (ptr addrspace(3) @notLowered to ptr)], section "llvm.metadata"
+
+define amdgpu_kernel void @kern(i32 %val0) {
+; CHECK-LABEL: define amdgpu_kernel void @kern(
+; CHECK-SAME: i32 [[VAL0:%.*]]) {
+; CHECK-NEXT: [[VAL1:%.*]] = add i32 [[VAL0]], 4
+; CHECK-NEXT: store i32 [[VAL1]], ptr addrspace(3) @lowered, align 4
+; CHECK-NEXT: ret void
+;
+ %val1 = add i32 %val0, 4
+ store i32 %val1, ptr addrspace(3) @lowered
+ ret void
+}
+
+
+!0 = !{i32 0, i32 1}
diff --git a/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll b/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
index b512a43..b1f4f2e 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
@@ -8,7 +8,7 @@
define amdgpu_kernel void @kern() {
%val0 = load i32, ptr addrspace(3) @var1
%val1 = add i32 %val0, 4
- store i32 %val1, ptr addrspace(3) @var1
+ store i32 %val1, ptr addrspace(3) @var2
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 5007f77..0ff5dd3 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -195,13 +195,13 @@
; GCN-O1-NEXT: Uniformity Analysis
; GCN-O1-NEXT: AMDGPU atomic optimizations
; GCN-O1-NEXT: Expand Atomic instructions
-; GCN-O1-NEXT: AMDGPU Promote Alloca
; GCN-O1-NEXT: Dominator Tree Construction
+; GCN-O1-NEXT: Natural Loop Information
+; GCN-O1-NEXT: AMDGPU Promote Alloca
; GCN-O1-NEXT: Cycle Info Analysis
; GCN-O1-NEXT: Uniformity Analysis
; GCN-O1-NEXT: AMDGPU IR optimizations
; GCN-O1-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O1-NEXT: Natural Loop Information
; GCN-O1-NEXT: Canonicalize natural loops
; GCN-O1-NEXT: Scalar Evolution Analysis
; GCN-O1-NEXT: Loop Pass Manager
@@ -470,9 +470,9 @@
; GCN-O1-OPTS-NEXT: Uniformity Analysis
; GCN-O1-OPTS-NEXT: AMDGPU atomic optimizations
; GCN-O1-OPTS-NEXT: Expand Atomic instructions
-; GCN-O1-OPTS-NEXT: AMDGPU Promote Alloca
; GCN-O1-OPTS-NEXT: Dominator Tree Construction
; GCN-O1-OPTS-NEXT: Natural Loop Information
+; GCN-O1-OPTS-NEXT: AMDGPU Promote Alloca
; GCN-O1-OPTS-NEXT: Canonicalize natural loops
; GCN-O1-OPTS-NEXT: Lazy Branch Probability Analysis
; GCN-O1-OPTS-NEXT: Lazy Block Frequency Analysis
@@ -775,9 +775,9 @@
; GCN-O2-NEXT: Uniformity Analysis
; GCN-O2-NEXT: AMDGPU atomic optimizations
; GCN-O2-NEXT: Expand Atomic instructions
-; GCN-O2-NEXT: AMDGPU Promote Alloca
; GCN-O2-NEXT: Dominator Tree Construction
; GCN-O2-NEXT: Natural Loop Information
+; GCN-O2-NEXT: AMDGPU Promote Alloca
; GCN-O2-NEXT: Split GEPs to a variadic base and a constant offset for better CSE
; GCN-O2-NEXT: Scalar Evolution Analysis
; GCN-O2-NEXT: Straight line strength reduction
@@ -1084,9 +1084,9 @@
; GCN-O3-NEXT: Uniformity Analysis
; GCN-O3-NEXT: AMDGPU atomic optimizations
; GCN-O3-NEXT: Expand Atomic instructions
-; GCN-O3-NEXT: AMDGPU Promote Alloca
; GCN-O3-NEXT: Dominator Tree Construction
; GCN-O3-NEXT: Natural Loop Information
+; GCN-O3-NEXT: AMDGPU Promote Alloca
; GCN-O3-NEXT: Split GEPs to a variadic base and a constant offset for better CSE
; GCN-O3-NEXT: Scalar Evolution Analysis
; GCN-O3-NEXT: Straight line strength reduction
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
index b4415c1..f6197e0 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
@@ -1,132 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-SDAG-W32 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-GISEL-W32 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
-declare <2 x i32> @llvm.amdgcn.global.load.tr.v2i32.p1(ptr addrspace(1))
-declare <8 x i16> @llvm.amdgcn.global.load.tr.v8i16.p1(ptr addrspace(1))
-declare <8 x half> @llvm.amdgcn.global.load.tr.v8f16.p1(ptr addrspace(1))
-declare <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16.p1(ptr addrspace(1))
+declare <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32.p1(ptr addrspace(1))
+declare <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16.p1(ptr addrspace(1))
define amdgpu_kernel void @global_load_tr_b64(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b64:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b64:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
+; GFX12-LABEL: global_load_tr_b64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32.p1(ptr addrspace(1) %gep)
+ %val = call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32.p1(ptr addrspace(1) %gep)
store <2 x i32> %val, ptr addrspace(1) %use
ret void
}
-define amdgpu_kernel void @global_load_tr_b128_i16(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_i16:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_i16:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
+define amdgpu_kernel void @global_load_tr_b128(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX12-LABEL: global_load_tr_b128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b128 v4, v[0:3], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16.p1(ptr addrspace(1) %gep)
+ %val = call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16.p1(ptr addrspace(1) %gep)
store <8 x i16> %val, ptr addrspace(1) %use
ret void
}
-
-define amdgpu_kernel void @global_load_tr_b128_half(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_half:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_half:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x half> @llvm.amdgcn.global.load.tr.v8f16.p1(ptr addrspace(1) %gep)
- store <8 x half> %val, ptr addrspace(1) %use
- ret void
-}
-
-define amdgpu_kernel void @global_load_tr_b128_bfloat(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_bfloat:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_bfloat:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16.p1(ptr addrspace(1) %gep)
- store <8 x bfloat> %val, ptr addrspace(1) %use
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
index 7ad1416..a2dc366 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
@@ -1,132 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-SDAG-W64 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-GISEL-W64 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
-declare i32 @llvm.amdgcn.global.load.tr.i32.p1(ptr addrspace(1))
-declare <4 x i16> @llvm.amdgcn.global.load.tr.v4i16.p1(ptr addrspace(1))
-declare <4 x half> @llvm.amdgcn.global.load.tr.v4f16.p1(ptr addrspace(1))
-declare <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16.p1(ptr addrspace(1))
+declare i32 @llvm.amdgcn.global.load.tr.b64.i32.p1(ptr addrspace(1))
+declare <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16.p1(ptr addrspace(1))
define amdgpu_kernel void @global_load_tr_b64(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b64:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v0, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b32 v0, v1, s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b64:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v0, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b32 v0, v1, s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
+; GFX12-LABEL: global_load_tr_b64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b32 v0, v1, s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call i32 @llvm.amdgcn.global.load.tr.i32.p1(ptr addrspace(1) %gep)
+ %val = call i32 @llvm.amdgcn.global.load.tr.b64.i32.p1(ptr addrspace(1) %gep)
store i32 %val, ptr addrspace(1) %use
ret void
}
-define amdgpu_kernel void @global_load_tr_b128_i16(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_i16:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_i16:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
+define amdgpu_kernel void @global_load_tr_b128(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX12-LABEL: global_load_tr_b128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16.p1(ptr addrspace(1) %gep)
+ %val = call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16.p1(ptr addrspace(1) %gep)
store <4 x i16> %val, ptr addrspace(1) %use
ret void
}
-
-define amdgpu_kernel void @global_load_tr_b128_half(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_half:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_half:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x half> @llvm.amdgcn.global.load.tr.v4f16.p1(ptr addrspace(1) %gep)
- store <4 x half> %val, ptr addrspace(1) %use
- ret void
-}
-
-define amdgpu_kernel void @global_load_tr_b128_bfloat(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_bfloat:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_bfloat:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16.p1(ptr addrspace(1) %gep)
- store <4 x bfloat> %val, ptr addrspace(1) %use
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
index 091b29c..e93595b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
@@ -4,6 +4,8 @@
--- |
define amdgpu_kernel void @single-wave-phase-2b(ptr addrspace(3) noalias %in0, ptr addrspace(3) noalias %in1, ptr addrspace(3) noalias %in2, ptr addrspace(3) noalias %in3, ptr addrspace(3) noalias %in4, ptr addrspace(3) noalias %in5, ptr addrspace(3) noalias %in6, ptr addrspace(3) noalias %in7, ptr addrspace(3) noalias %in8, ptr addrspace(3) noalias %in9, ptr addrspace(3) noalias %in10, ptr addrspace(3) noalias %in11, ptr addrspace(7) noalias %in12, ptr addrspace(7) noalias %in13, ptr addrspace(7) noalias %in14, ptr addrspace(7) noalias %in15, ptr addrspace(7) noalias %in16, ptr addrspace(7) noalias %in17, ptr addrspace(7) noalias %in18, ptr addrspace(7) noalias %in19, ptr addrspace(7) noalias %in20, ptr addrspace(7) noalias %in21, ptr addrspace(7) noalias %in22, ptr addrspace(7) noalias %in23, ptr addrspace(7) noalias %in24, ptr addrspace(7) noalias %in25, ptr addrspace(7) noalias %in26, ptr addrspace(7) noalias %in27, ptr addrspace(7) noalias %in28, ptr addrspace(7) noalias %in29) #0 { ret void }
+ attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,1" "amdgpu-flat-work-group-size"="1,256" }
+
!0 = distinct !{!0}
!1 = !{!1, !0}
...
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
index 1348315..7b1f55e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
@@ -22,18 +22,36 @@ main_body:
define amdgpu_ps <4 x float> @load_2dmsaa_both(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %fragid) {
; GFX11-LABEL: load_2dmsaa_both:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:4], v[0:2], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x98,0x02,0x60,0xf0,0x00,0x00,0x60,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x08,0x05]
+; GFX11-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x07]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) ; encoding: [0x42,0x02,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v9, v8 ; encoding: [0x08,0x03,0x12,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v10, v8 ; encoding: [0x08,0x03,0x14,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v11, v8 ; encoding: [0x08,0x03,0x16,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v12, v8 ; encoding: [0x08,0x03,0x18,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v8 :: v_dual_mov_b32 v1, v9 ; encoding: [0x08,0x01,0x10,0xca,0x09,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x93,0x01,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v2, v10 :: v_dual_mov_b32 v3, v11 ; encoding: [0x0a,0x01,0x10,0xca,0x0b,0x01,0x02,0x02]
+; GFX11-NEXT: v_mov_b32_e32 v4, v12 ; encoding: [0x0c,0x03,0x08,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:4], v[5:7], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x98,0x02,0x60,0xf0,0x05,0x00,0x60,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x05,0x04,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v8, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x08,0x04,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2dmsaa_both:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:4], [v0, v1, v2], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x0e,0x20,0x86,0xe4,0x00,0x01,0x00,0x00,0x00,0x01,0x02,0x00]
-; GFX12-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v7, v0 :: v_dual_mov_b32 v8, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x08,0x07]
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x05]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v9, v8 :: v_dual_mov_b32 v10, v8 ; encoding: [0x08,0x01,0x10,0xca,0x08,0x01,0x0a,0x09]
+; GFX12-NEXT: v_dual_mov_b32 v11, v8 :: v_dual_mov_b32 v12, v8 ; encoding: [0x08,0x01,0x10,0xca,0x08,0x01,0x0c,0x0b]
+; GFX12-NEXT: v_dual_mov_b32 v0, v8 :: v_dual_mov_b32 v1, v9 ; encoding: [0x08,0x01,0x10,0xca,0x09,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x92,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v2, v10 :: v_dual_mov_b32 v3, v11 ; encoding: [0x0a,0x01,0x10,0xca,0x0b,0x01,0x02,0x02]
+; GFX12-NEXT: v_mov_b32_e32 v4, v12 ; encoding: [0x0c,0x03,0x08,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:4], [v7, v6, v5], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x0e,0x20,0x86,0xe4,0x00,0x01,0x00,0x00,0x07,0x06,0x05,0x00]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x05,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v8, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x08,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32i32.i32(i32 2, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 3, i32 0)
@@ -63,18 +81,37 @@ main_body:
define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
; GFX11-LABEL: load_2darraymsaa_tfe:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:4], v[0:3], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x9c,0x08,0x60,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v8, v3 ; encoding: [0x80,0x00,0x10,0xca,0x03,0x01,0x08,0x09]
+; GFX11-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x07]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) ; encoding: [0x42,0x02,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, v9 ; encoding: [0x00,0x01,0x10,0xca,0x09,0x01,0x0a,0x05]
+; GFX11-NEXT: v_mov_b32_e32 v11, v9 ; encoding: [0x09,0x03,0x16,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v12, v9 ; encoding: [0x09,0x03,0x18,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v13, v9 ; encoding: [0x09,0x03,0x1a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v9 :: v_dual_mov_b32 v1, v10 ; encoding: [0x09,0x01,0x10,0xca,0x0a,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x93,0x01,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v2, v11 :: v_dual_mov_b32 v3, v12 ; encoding: [0x0b,0x01,0x10,0xca,0x0c,0x01,0x02,0x02]
+; GFX11-NEXT: v_mov_b32_e32 v4, v13 ; encoding: [0x0d,0x03,0x08,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:4], v[5:8], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x9c,0x08,0x60,0xf0,0x05,0x00,0x20,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x05,0x04,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v9, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x09,0x04,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2darraymsaa_tfe:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:4], [v0, v1, v2, v3], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x0f,0x20,0x06,0xe6,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x03]
-; GFX12-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX12-NEXT: v_mov_b32_e32 v9, 0 ; encoding: [0x80,0x02,0x12,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v6, v2 ; encoding: [0x03,0x01,0x10,0xca,0x02,0x01,0x06,0x05]
+; GFX12-NEXT: v_dual_mov_b32 v7, v1 :: v_dual_mov_b32 v8, v0 ; encoding: [0x01,0x01,0x10,0xca,0x00,0x01,0x08,0x07]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x23,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v10, v9 :: v_dual_mov_b32 v11, v9 ; encoding: [0x09,0x01,0x10,0xca,0x09,0x01,0x0a,0x0a]
+; GFX12-NEXT: v_dual_mov_b32 v12, v9 :: v_dual_mov_b32 v13, v9 ; encoding: [0x09,0x01,0x10,0xca,0x09,0x01,0x0c,0x0c]
+; GFX12-NEXT: v_dual_mov_b32 v0, v9 :: v_dual_mov_b32 v1, v10 ; encoding: [0x09,0x01,0x10,0xca,0x0a,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x92,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v2, v11 :: v_dual_mov_b32 v3, v12 ; encoding: [0x0b,0x01,0x10,0xca,0x0c,0x01,0x02,0x02]
+; GFX12-NEXT: v_mov_b32_e32 v4, v13 ; encoding: [0x0d,0x03,0x08,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:4], [v8, v7, v6, v5], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x0f,0x20,0x06,0xe6,0x00,0x00,0x00,0x00,0x08,0x07,0x06,0x05]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x05,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v9, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x09,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f32i32.i32(i32 8, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
@@ -155,18 +192,31 @@ main_body:
define amdgpu_ps <4 x half> @load_2dmsaa_tfe_d16(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %fragid) {
; GFX11-LABEL: load_2dmsaa_tfe_d16:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:2], v[0:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x98,0x01,0x62,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v6, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x03]
+; GFX11-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x05]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v7, v6 ; encoding: [0x06,0x03,0x0e,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v8, v6 ; encoding: [0x06,0x03,0x10,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 ; encoding: [0x06,0x01,0x10,0xca,0x07,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v2, v8 ; encoding: [0x08,0x03,0x04,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:2], v[3:5], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x98,0x01,0x62,0xf0,0x03,0x00,0x20,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x03,0x02,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v6, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x06,0x02,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2dmsaa_tfe_d16:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:2], [v0, v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x2e,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x00]
-; GFX12-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v6, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x05]
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x03]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; encoding: [0x92,0x00,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v8, v6 ; encoding: [0x06,0x01,0x10,0xca,0x06,0x01,0x08,0x07]
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 ; encoding: [0x06,0x01,0x10,0xca,0x07,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX12-NEXT: v_mov_b32_e32 v2, v8 ; encoding: [0x08,0x03,0x04,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:2], [v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x2e,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x05,0x04,0x03,0x00]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v6, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x06,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x half>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f16i32.i32(i32 1, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
@@ -196,18 +246,31 @@ main_body:
define amdgpu_ps <4 x half> @load_2darraymsaa_tfe_d16(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
; GFX11-LABEL: load_2darraymsaa_tfe_d16:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:2], v[0:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x9c,0x01,0x62,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x06]
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x04]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v8, v7 ; encoding: [0x07,0x03,0x10,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v9, v7 ; encoding: [0x07,0x03,0x12,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 ; encoding: [0x07,0x01,0x10,0xca,0x08,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v2, v9 ; encoding: [0x09,0x03,0x04,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:2], [v6, v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x9d,0x01,0x62,0xf0,0x06,0x00,0x20,0x00,0x05,0x04,0x03,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x03,0x02,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v7, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x07,0x02,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2darraymsaa_tfe_d16:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:2], [v0, v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x2f,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x03]
-; GFX12-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x06]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x04]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; encoding: [0x92,0x00,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v8, v7 :: v_dual_mov_b32 v9, v7 ; encoding: [0x07,0x01,0x10,0xca,0x07,0x01,0x08,0x08]
+; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 ; encoding: [0x07,0x01,0x10,0xca,0x08,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX12-NEXT: v_mov_b32_e32 v2, v9 ; encoding: [0x09,0x03,0x04,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:2], [v6, v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x2f,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x06,0x05,0x04,0x03]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v7, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x07,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x half>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f16i32.i32(i32 1, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
index 429528e..e3dd036 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
@@ -147,6 +147,34 @@ main_body:
ret half %res
}
+define amdgpu_ps half @v_interp_rtz_f16(float inreg %i, float inreg %j, i32 inreg %m0) #0 {
+; GCN-LABEL: v_interp_rtz_f16:
+; GCN: ; %bb.0: ; %main_body
+; GCN-NEXT: s_mov_b32 s3, exec_lo
+; GCN-NEXT: s_wqm_b32 exec_lo, exec_lo
+; GCN-NEXT: s_mov_b32 m0, s2
+; GCN-NEXT: lds_param_load v1, attr0.x wait_vdst:15
+; GCN-NEXT: s_mov_b32 exec_lo, s3
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v2, s1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v3, v1, v0, v1 wait_exp:0
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v0, v1, v0, v1 op_sel:[1,0,1,0] wait_exp:7
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v3, v1, v2, v3 wait_exp:7
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v0, v1, v2, v0 op_sel:[1,0,0,0] wait_exp:7
+; GCN-NEXT: v_add_f16_e32 v0, v3, v0
+; GCN-NEXT: ; return to shader part epilog
+main_body:
+ %p0 = call float @llvm.amdgcn.lds.param.load(i32 0, i32 0, i32 %m0)
+ %l_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 0)
+ %l_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %l_p0, i1 0)
+ %h_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 1)
+ %h_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %h_p0, i1 1)
+ %res = fadd half %l_p1, %h_p1
+ ret half %res
+}
+
define amdgpu_ps half @v_interp_f16_imm_params(float inreg %i, float inreg %j) #0 {
; GCN-LABEL: v_interp_f16_imm_params:
; GCN: ; %bb.0: ; %main_body
@@ -172,6 +200,8 @@ declare float @llvm.amdgcn.interp.inreg.p10(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p2(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p10.f16(float, float, float, i1) #0
declare half @llvm.amdgcn.interp.inreg.p2.f16(float, float, float, i1) #0
+declare float @llvm.amdgcn.interp.p10.rtz.f16(float, float, float, i1) #0
+declare half @llvm.amdgcn.interp.p2.rtz.f16(float, float, float, i1) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare void @llvm.amdgcn.exp.f16(i32, i32, float, float, float, float, i1, i1) #0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
new file mode 100644
index 0000000..fdcb177
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
@@ -0,0 +1,333 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -misched-cluster=0 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -misched-cluster=0 -amdgpu-igrouplp-exact-solver-max-branches=250000 < %s | FileCheck -check-prefix=EXACTCUTOFF %s
+
+declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16..i16(<8 x half>, <16 x half>, <8 x half>, i16)
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_cluster(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT: v_lshlrev_b32_e32 v28, 4, v0
+; GCN-NEXT: v_mov_b32_e32 v48, 0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v28
+; GCN-NEXT: v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
+; GCN-NEXT: ds_load_b128 v[8:11], v0
+; GCN-NEXT: ds_load_b128 v[12:15], v0 offset:512
+; GCN-NEXT: ds_load_b128 v[16:19], v0 offset:1536
+; GCN-NEXT: ds_load_b128 v[20:23], v0 offset:3072
+; GCN-NEXT: ds_load_b128 v[24:27], v0 offset:5120
+; GCN-NEXT: ds_load_b128 v[4:7], v0 offset:11280
+; GCN-NEXT: ds_load_b128 v[0:3], v0 offset:11264
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(7) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x6
+; GCN-NEXT: v_mov_b32_e32 v31, v11
+; GCN-NEXT: s_wait_dscnt 0x5
+; GCN-NEXT: v_mov_b32_e32 v35, v15
+; GCN-NEXT: s_wait_dscnt 0x4
+; GCN-NEXT: v_mov_b32_e32 v39, v19
+; GCN-NEXT: s_wait_dscnt 0x3
+; GCN-NEXT: v_mov_b32_e32 v43, v23
+; GCN-NEXT: s_wait_dscnt 0x2
+; GCN-NEXT: v_dual_mov_b32 v47, v27 :: v_dual_mov_b32 v30, v10
+; GCN-NEXT: v_dual_mov_b32 v29, v9 :: v_dual_mov_b32 v28, v8
+; GCN-NEXT: v_dual_mov_b32 v34, v14 :: v_dual_mov_b32 v33, v13
+; GCN-NEXT: v_mov_b32_e32 v32, v12
+; GCN-NEXT: v_dual_mov_b32 v38, v18 :: v_dual_mov_b32 v37, v17
+; GCN-NEXT: v_mov_b32_e32 v36, v16
+; GCN-NEXT: v_dual_mov_b32 v42, v22 :: v_dual_mov_b32 v41, v21
+; GCN-NEXT: v_mov_b32_e32 v40, v20
+; GCN-NEXT: v_dual_mov_b32 v46, v26 :: v_dual_mov_b32 v45, v25
+; GCN-NEXT: v_mov_b32_e32 v44, v24
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[28:31], v[8:11], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[32:35], v[12:15], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[36:39], v[16:19], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[40:43], v[20:23], v[0:7], v48
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[44:47], v[24:27], v[0:7], v48
+; GCN-NEXT: ds_store_b128 v49, v[28:31]
+; GCN-NEXT: ds_store_b128 v50, v[32:35] offset:512
+; GCN-NEXT: ds_store_b128 v50, v[36:39] offset:1024
+; GCN-NEXT: ds_store_b128 v50, v[40:43] offset:1536
+; GCN-NEXT: ds_store_b128 v50, v[44:47] offset:2048
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(5) SyncID(0)
+; GCN-NEXT: s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
+; EXACTCUTOFF: ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v28, 4, v0
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v48, 0
+; EXACTCUTOFF-NEXT: s_wait_kmcnt 0x0
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; EXACTCUTOFF-NEXT: v_add_nc_u32_e32 v0, s0, v28
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
+; EXACTCUTOFF-NEXT: ds_load_b128 v[8:11], v0
+; EXACTCUTOFF-NEXT: ds_load_b128 v[12:15], v0 offset:512
+; EXACTCUTOFF-NEXT: ds_load_b128 v[16:19], v0 offset:1536
+; EXACTCUTOFF-NEXT: ds_load_b128 v[20:23], v0 offset:3072
+; EXACTCUTOFF-NEXT: ds_load_b128 v[24:27], v0 offset:5120
+; EXACTCUTOFF-NEXT: ds_load_b128 v[4:7], v0 offset:11280
+; EXACTCUTOFF-NEXT: ds_load_b128 v[0:3], v0 offset:11264
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(7) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x6
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v31, v11
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x5
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v35, v15
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x4
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v39, v19
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x3
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v43, v23
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x2
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v47, v27 :: v_dual_mov_b32 v30, v10
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v29, v9 :: v_dual_mov_b32 v28, v8
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v34, v14 :: v_dual_mov_b32 v33, v13
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v32, v12
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v38, v18 :: v_dual_mov_b32 v37, v17
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v36, v16
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v42, v22 :: v_dual_mov_b32 v41, v21
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v40, v20
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v46, v26 :: v_dual_mov_b32 v45, v25
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v44, v24
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[28:31], v[8:11], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[32:35], v[12:15], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[36:39], v[16:19], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[40:43], v[20:23], v[0:7], v48
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[44:47], v[24:27], v[0:7], v48
+; EXACTCUTOFF-NEXT: ds_store_b128 v49, v[28:31]
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[32:35] offset:512
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[36:39] offset:1024
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[40:43] offset:1536
+; EXACTCUTOFF-NEXT: ds_store_b128 v50, v[44:47] offset:2048
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %load.0.addr = getelementptr <8 x half>, ptr addrspace(3) %in, i32 %idx
+ %load.0 = load <8 x half>, ptr addrspace(3) %load.0.addr
+ %load.1.addr = getelementptr <8 x half>, ptr addrspace(3) %load.0.addr, i32 32
+ %load.1 = load <8 x half>, ptr addrspace(3) %load.1.addr
+ %load.2.addr = getelementptr <8 x half>, ptr addrspace(3) %load.1.addr, i32 64
+ %load.2 = load <8 x half>, ptr addrspace(3) %load.2.addr
+ %load.3.addr = getelementptr <8 x half>, ptr addrspace(3) %load.2.addr, i32 96
+ %load.3 = load <8 x half>, ptr addrspace(3) %load.3.addr
+ %load.4.addr = getelementptr <8 x half>, ptr addrspace(3) %load.3.addr, i32 128
+ %load.4 = load <8 x half>, ptr addrspace(3) %load.4.addr
+ %load.b.addr = getelementptr <16 x half>, ptr addrspace(3) %load.4.addr, i32 192
+ %load.b = load <16 x half>, ptr addrspace(3) %load.b.addr
+ %mai.0 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.0, <16 x half> %load.b, <8 x half> %load.0, i1 0)
+ %mai.1 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.1, <16 x half> %load.b, <8 x half> %load.1, i1 0)
+ %mai.2 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.2, <16 x half> %load.b, <8 x half> %load.2, i1 0)
+ %mai.3 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.3, <16 x half> %load.b, <8 x half> %load.3, i1 0)
+ %mai.4 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.4, <16 x half> %load.b, <8 x half> %load.4, i1 0)
+ %store.0.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 %idx
+ store <8 x half> %mai.0, ptr addrspace(3) %store.0.addr
+ %store.1.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 32
+ store <8 x half> %mai.1, ptr addrspace(3) %store.1.addr
+ %store.2.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 64
+ store <8 x half> %mai.2, ptr addrspace(3) %store.2.addr
+ %store.3.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 96
+ store <8 x half> %mai.3, ptr addrspace(3) %store.3.addr
+ %store.4.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 128
+ store <8 x half> %mai.4, ptr addrspace(3) %store.4.addr
+ ; 7 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 7, i32 0)
+ ; 5 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 5, i32 0)
+ ; 5 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 5, i32 0)
+ ret void
+}
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_interleaved(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_SWMMAC_interleaved:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT: v_mov_b32_e32 v18, 0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_lshl_add_u32 v17, v0, 5, s0
+; GCN-NEXT: v_lshl_add_u32 v0, v0, 4, s1
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:1024
+; GCN-NEXT: ds_load_b128 v[1:4], v17
+; GCN-NEXT: ds_load_b128 v[5:8], v17 offset:16
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(3) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x2
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16]
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:2560
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:512
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:4608
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:1024
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:7168
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:1536
+; GCN-NEXT: ds_load_b128 v[9:12], v17 offset:10240
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT: ds_store_b128 v0, v[13:16] offset:2048
+; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT: s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_SWMMAC_interleaved:
+; EXACTCUTOFF: ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v18, 0
+; EXACTCUTOFF-NEXT: s_wait_kmcnt 0x0
+; EXACTCUTOFF-NEXT: v_lshl_add_u32 v17, v0, 5, s0
+; EXACTCUTOFF-NEXT: v_lshl_add_u32 v0, v0, 4, s1
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:1024
+; EXACTCUTOFF-NEXT: ds_load_b128 v[1:4], v17
+; EXACTCUTOFF-NEXT: ds_load_b128 v[5:8], v17 offset:16
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(3) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x2
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16]
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:2560
+; EXACTCUTOFF-NEXT: v_mov_b32_e32 v0, s1
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:512
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:4608
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:1024
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:7168
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:1536
+; EXACTCUTOFF-NEXT: ds_load_b128 v[9:12], v17 offset:10240
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT: v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT: v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: ds_store_b128 v0, v[13:16] offset:2048
+; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT: s_endpgm
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %load.b.addr = getelementptr <16 x half>, ptr addrspace(3) %in, i32 %idx
+ %load.b = load <16 x half>, ptr addrspace(3) %load.b.addr
+ %load.0.addr = getelementptr <8 x half>, ptr addrspace(3) %load.b.addr, i32 64
+ %load.0 = load <8 x half>, ptr addrspace(3) %load.0.addr
+ %load.1.addr = getelementptr <8 x half>, ptr addrspace(3) %load.0.addr, i32 96
+ %load.1 = load <8 x half>, ptr addrspace(3) %load.1.addr
+ %load.2.addr = getelementptr <8 x half>, ptr addrspace(3) %load.1.addr, i32 128
+ %load.2 = load <8 x half>, ptr addrspace(3) %load.2.addr
+ %load.3.addr = getelementptr <8 x half>, ptr addrspace(3) %load.2.addr, i32 160
+ %load.3 = load <8 x half>, ptr addrspace(3) %load.3.addr
+ %load.4.addr = getelementptr <8 x half>, ptr addrspace(3) %load.3.addr, i32 192
+ %load.4 = load <8 x half>, ptr addrspace(3) %load.4.addr
+ %mai.0 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.0, <16 x half> %load.b, <8 x half> %load.0, i1 0)
+ %mai.1 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.1, <16 x half> %load.b, <8 x half> %load.1, i1 0)
+ %mai.2 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.2, <16 x half> %load.b, <8 x half> %load.2, i1 0)
+ %mai.3 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.3, <16 x half> %load.b, <8 x half> %load.3, i1 0)
+ %mai.4 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.4, <16 x half> %load.b, <8 x half> %load.4, i1 0)
+ %store.0.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 %idx
+ store <8 x half> %mai.0, ptr addrspace(3) %store.0.addr
+ %store.1.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 32
+ store <8 x half> %mai.1, ptr addrspace(3) %store.1.addr
+ %store.2.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 64
+ store <8 x half> %mai.2, ptr addrspace(3) %store.2.addr
+ %store.3.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 96
+ store <8 x half> %mai.3, ptr addrspace(3) %store.3.addr
+ %store.4.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 128
+ store <8 x half> %mai.4, ptr addrspace(3) %store.4.addr
+ ; 3 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 3, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ; 1 DS read
+ call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+ ; 1 SWMMAC
+ call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+ ; 1 DS write
+ call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
index 00be32b..ba3d306 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
@@ -2,6 +2,7 @@
;RUN: llc < %s -mtriple=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefixes=GFX6 %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=GFX8PLUS %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=GFX11 %s
+;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-enable-prt-strict-null -verify-machineinstrs | FileCheck --check-prefixes=NOPRT %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck --check-prefixes=GFX12,GFX12-SDAG %s
;RUN: llc < %s -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck --check-prefixes=GFX12,GFX12-GISEL %s
@@ -34,6 +35,16 @@ define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(<4 x i32>
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_clause 0x2
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 0 idxen
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], 0 idxen glc
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v8, 0
@@ -75,6 +86,13 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_immoffs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_immoffs:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -146,6 +164,25 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs_large(<4 x i32> inreg) {
; GFX11-NEXT: v_add_f32_e32 v2, v10, v2
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_immoffs_large:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_movk_i32 s4, 0x7ffc
+; NOPRT-NEXT: s_clause 0x1
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 60 idxen offset:4092
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], s4 idxen offset:4092
+; NOPRT-NEXT: s_mov_b32 s4, 0x8ffc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_add_f32_e32 v1, v1, v5
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], s4 idxen offset:4
+; NOPRT-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v3, v3, v7
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v1, v9, v1
+; NOPRT-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; NOPRT-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
+; NOPRT-NEXT: v_add_f32_e32 v2, v10, v2
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_immoffs_large:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v8, 0
@@ -196,6 +233,13 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_12bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_12bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_12bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -235,6 +279,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_13bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_13bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_13bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -274,6 +327,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_16bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_16bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xf000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_16bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -313,6 +375,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_23bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_23bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x7ff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_23bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -352,6 +423,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_24bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_24bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xfff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-LABEL: buffer_load_voffset_large_24bit:
; GFX12-SDAG: ; %bb.0: ; %main_body
; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0x800000 :: v_dual_mov_b32 v0, 0
@@ -389,6 +469,12 @@ define amdgpu_ps <4 x float> @buffer_load_idx(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_idx:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_idx:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], null idxen
@@ -427,6 +513,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_ofs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_ofs:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, 0
@@ -466,6 +561,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs_imm(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_ofs_imm:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_ofs_imm:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, 0
@@ -497,6 +601,12 @@ define amdgpu_ps <4 x float> @buffer_load_both(<4 x i32> inreg, i32, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_both:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_both:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], null idxen offen
@@ -529,6 +639,13 @@ define amdgpu_ps <4 x float> @buffer_load_both_reversed(<4 x i32> inreg, i32, i3
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_both_reversed:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v2, v0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_both_reversed:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v2, v0
@@ -562,6 +679,13 @@ define amdgpu_ps float @buffer_load_x(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_x:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_x:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -595,6 +719,13 @@ define amdgpu_ps float @buffer_load_x_i32(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_x_i32:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_x_i32:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -629,6 +760,13 @@ define amdgpu_ps <2 x float> @buffer_load_xy(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_xy:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_xy:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -644,7 +782,12 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v4i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
-; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX6-NEXT: v_mov_b32_e32 v7, 2
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
+; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s0, s2
@@ -658,7 +801,12 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v4i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
-; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX8PLUS-NEXT: v_mov_b32_e32 v7, 2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
+; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; GFX8PLUS-NEXT: v_mov_b32_e32 v0, v6
@@ -667,22 +815,40 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
;
; GFX11-LABEL: buffer_load_v4i32_tfe:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v7, 2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
+; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v4i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v2, 2
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v4i32_tfe:
; GFX12: ; %bb.0:
-; GFX12-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], null idxen tfe
+; GFX12-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v7, 2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v2
+; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX12-NEXT: v_mov_b32_e32 v0, v6
; GFX12-NEXT: ; return to shader part epilog
- %load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.format.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.format.sl_v4i32i32s(<4 x i32> %rsrc, i32 2, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %load, 0
store <4 x i32> %data, ptr addrspace(1) %out
%status = extractvalue { <4 x i32>, i32 } %load, 1
@@ -694,6 +860,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v4f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -708,6 +878,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v4f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -718,15 +892,32 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v4f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v4f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v4f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v2
; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b128 v[0:1], v[2:5], off
@@ -744,6 +935,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v3i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -759,6 +953,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v3i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -769,15 +966,31 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v3i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v3i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v3i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_mov_b32_e32 v5, v2
; GFX12-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b96 v[0:1], v[2:4], off
@@ -795,6 +1008,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v3f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -810,6 +1026,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v3f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -820,15 +1039,31 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v3f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v3f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v3f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_mov_b32_e32 v5, v2
; GFX12-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b96 v[0:1], v[2:4], off
@@ -846,6 +1081,9 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v2i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -860,6 +1098,8 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v2i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -870,15 +1110,29 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v2i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v2i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v2i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
; GFX12-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -896,6 +1150,9 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v2f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -910,6 +1167,8 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v2f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -920,15 +1179,29 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v2f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v2f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v2f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
; GFX12-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -946,6 +1219,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX6-LABEL: buffer_load_i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -960,6 +1234,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX8PLUS-LABEL: buffer_load_i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -970,15 +1245,28 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX11-LABEL: buffer_load_i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
; GFX12-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
@@ -996,6 +1284,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX6-LABEL: buffer_load_f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -1010,6 +1299,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX8PLUS-LABEL: buffer_load_f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -1020,15 +1310,28 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX11-LABEL: buffer_load_f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
; GFX12-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
index b0bd4e4..c5202b8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
@@ -2,6 +2,7 @@
;RUN: llc < %s -mtriple=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefixes=GFX6 %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=GFX8PLUS %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=GFX11 %s
+;RUN: llc < %s -mtriple=amdgcn -mattr=-enable-prt-strict-null -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=NOPRT %s
define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(ptr addrspace(8) inreg) {
; GFX6-LABEL: buffer_load:
@@ -31,6 +32,16 @@ define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(ptr addrsp
; GFX11-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_clause 0x2
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 0 idxen
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], 0 idxen glc
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 0, i32 0, i32 0)
%data_glc = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 0, i32 0, i32 1)
@@ -62,6 +73,13 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs(ptr addrspace(8) inreg) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_immoffs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 42, i32 0, i32 0)
ret <4 x float> %data
@@ -126,6 +144,25 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs_large(ptr addrspace(8) inreg)
; GFX11-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
; GFX11-NEXT: v_add_f32_e32 v2, v10, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_immoffs_large:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_movk_i32 s4, 0x7ffc
+; NOPRT-NEXT: s_clause 0x1
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 60 idxen offset:4092
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], s4 idxen offset:4092
+; NOPRT-NEXT: s_mov_b32 s4, 0x8ffc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_add_f32_e32 v1, v1, v5
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], s4 idxen offset:4
+; NOPRT-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v3, v3, v7
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v1, v9, v1
+; NOPRT-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; NOPRT-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
+; NOPRT-NEXT: v_add_f32_e32 v2, v10, v2
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%d.0 = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 60, i32 0)
%d.1 = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 32764, i32 0)
@@ -156,6 +193,13 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_12bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_12bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 0, i32 0)
ret <4 x float> %data
@@ -188,6 +232,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_13bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_13bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 8188, i32 0, i32 0)
ret <4 x float> %data
@@ -220,6 +273,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_16bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_16bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xf000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 65532, i32 0, i32 0)
ret <4 x float> %data
@@ -252,6 +314,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_23bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_23bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x7ff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 8388604, i32 0, i32 0)
ret <4 x float> %data
@@ -284,6 +355,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_24bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_24bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xfff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 16777212, i32 0, i32 0)
ret <4 x float> %data
@@ -307,6 +387,12 @@ define amdgpu_ps <4 x float> @buffer_load_idx(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_idx:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %1, i32 0, i32 0, i32 0)
ret <4 x float> %data
@@ -339,6 +425,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_ofs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 %1, i32 0, i32 0)
ret <4 x float> %data
@@ -371,6 +466,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs_imm(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_ofs_imm:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%ofs = add i32 %1, 60
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 %ofs, i32 0, i32 0)
@@ -395,6 +499,12 @@ define amdgpu_ps <4 x float> @buffer_load_both(ptr addrspace(8) inreg, i32, i32)
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_both:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %1, i32 %2, i32 0, i32 0)
ret <4 x float> %data
@@ -421,6 +531,13 @@ define amdgpu_ps <4 x float> @buffer_load_both_reversed(ptr addrspace(8) inreg,
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_both_reversed:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v2, v0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %2, i32 %1, i32 0, i32 0)
ret <4 x float> %data
@@ -447,6 +564,13 @@ define amdgpu_ps float @buffer_load_x(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_x:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call float @llvm.amdgcn.struct.ptr.buffer.load.format.f32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
ret float %data
@@ -473,6 +597,13 @@ define amdgpu_ps float @buffer_load_x_i32(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_x_i32:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call i32 @llvm.amdgcn.struct.ptr.buffer.load.format.i32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%fdata = bitcast i32 %data to float
@@ -500,6 +631,13 @@ define amdgpu_ps <2 x float> @buffer_load_xy(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_xy:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <2 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v2f32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
ret <2 x float> %data
@@ -509,6 +647,10 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v4i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -523,6 +665,10 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v4i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -533,11 +679,25 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v4i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v4i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v4i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %load, 0
store <4 x i32> %data, ptr addrspace(1) %out
@@ -550,6 +710,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v4f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -564,6 +728,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v4f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -574,11 +742,25 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v4f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v4f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <4 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v4f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x float>, i32 } %load, 0
store <4 x float> %data, ptr addrspace(1) %out
@@ -591,6 +773,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v3i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -606,6 +791,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v3i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -616,11 +804,24 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v3i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v3i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <3 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v3i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x i32>, i32 } %load, 0
store <3 x i32> %data, ptr addrspace(1) %out
@@ -633,6 +834,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v3f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -648,6 +852,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v3f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -658,11 +865,24 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v3f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v3f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <3 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v3f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x float>, i32 } %load, 0
store <3 x float> %data, ptr addrspace(1) %out
@@ -675,6 +895,9 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v2i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -689,6 +912,8 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v2i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -699,11 +924,23 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v2i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v2i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <2 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v2i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x i32>, i32 } %load, 0
store <2 x i32> %data, ptr addrspace(1) %out
@@ -716,6 +953,9 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v2f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -730,6 +970,8 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v2f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -740,11 +982,23 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v2f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v2f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <2 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v2f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x float>, i32 } %load, 0
store <2 x float> %data, ptr addrspace(1) %out
@@ -757,6 +1011,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX6-LABEL: buffer_load_i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -771,6 +1026,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX8PLUS-LABEL: buffer_load_i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -781,11 +1037,22 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX11-LABEL: buffer_load_i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { i32, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i32, i32 } %load, 0
store i32 %data, ptr addrspace(1) %out
@@ -798,6 +1065,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX6-LABEL: buffer_load_f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -812,6 +1080,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX8PLUS-LABEL: buffer_load_f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -822,11 +1091,22 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX11-LABEL: buffer_load_f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { float, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { float, i32 } %load, 0
store float %data, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
index ab7ab4d..d056a97 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
@@ -32,8 +32,6 @@ define amdgpu_kernel void @maxnum_f16(
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
@@ -170,7 +168,6 @@ define amdgpu_kernel void @maxnum_f16_imm_a(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -279,7 +276,6 @@ define amdgpu_kernel void @maxnum_f16_imm_b(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -384,21 +380,17 @@ define amdgpu_kernel void @maxnum_v2f16(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s1, s2, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s0
-; SI-NEXT: s_lshr_b32 s0, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s1
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_max_f32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: s_lshr_b32 s3, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s2
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s0
; SI-NEXT: v_max_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_max_f32_e32 v1, v2, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
@@ -497,20 +489,18 @@ define amdgpu_kernel void @maxnum_v2f16_imm_a(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_max_f32_e32 v0, 0x40400000, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, 4.0, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_max_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -589,20 +579,18 @@ define amdgpu_kernel void @maxnum_v2f16_imm_b(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_max_f32_e32 v0, 4.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, 0x40400000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_max_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_max_f32_e32 v1, 4.0, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -688,27 +676,21 @@ define amdgpu_kernel void @maxnum_v3f16(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
-; SI-NEXT: s_lshr_b32 s3, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s3
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: s_lshr_b32 s8, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s8
; SI-NEXT: v_cvt_f32_f16_e32 v3, s2
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_max_f32_e32 v2, v3, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, v1, v3
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_max_f32_e32 v0, v0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s0
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s1
+; SI-NEXT: v_max_f32_e32 v1, v1, v2
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_max_f32_e32 v2, v3, v4
+; SI-NEXT: v_max_f32_e32 v0, v0, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_or_b32_e32 v1, v1, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; SI-NEXT: buffer_store_dword v1, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -837,25 +819,17 @@ define amdgpu_kernel void @maxnum_v4f16(
; SI-NEXT: v_cvt_f32_f16_e32 v2, s6
; SI-NEXT: s_lshr_b32 s6, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
; SI-NEXT: s_lshr_b32 s6, s5, 16
+; SI-NEXT: s_lshr_b32 s4, s4, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
; SI-NEXT: v_cvt_f32_f16_e32 v1, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_lshr_b32 s4, s4, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s5
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s5
; SI-NEXT: v_max_f32_e32 v3, v3, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v7
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_max_f32_e32 v1, v1, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v6
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_max_f32_e32 v2, v2, v5
-; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; SI-NEXT: v_max_f32_e32 v2, v2, v7
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_max_f32_e32 v1, v1, v6
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_max_f32_e32 v0, v0, v4
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -986,20 +960,16 @@ define amdgpu_kernel void @fmax_v4f16_imm_a(
; SI-NEXT: v_cvt_f32_f16_e32 v1, s5
; SI-NEXT: s_lshr_b32 s5, s5, 16
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: s_lshr_b32 s4, s4, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_max_f32_e32 v2, 4.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
; SI-NEXT: v_max_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_max_f32_e32 v0, 0x41000000, v0
+; SI-NEXT: v_max_f32_e32 v2, 4.0, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_max_f32_e32 v3, 2.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_max_f32_e32 v0, 0x41000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v1, v2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
index b7370ce..f934a2d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
@@ -32,8 +32,6 @@ define amdgpu_kernel void @minnum_f16_ieee(
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
@@ -197,7 +195,6 @@ define amdgpu_kernel void @minnum_f16_imm_a(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -305,7 +302,6 @@ define amdgpu_kernel void @minnum_f16_imm_b(
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -409,21 +405,17 @@ define amdgpu_kernel void @minnum_v2f16_ieee(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s1, s2, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s0
-; SI-NEXT: s_lshr_b32 s0, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s1
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_min_f32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: s_lshr_b32 s3, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s2
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s0
; SI-NEXT: v_min_f32_e32 v0, v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_min_f32_e32 v1, v2, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
@@ -556,20 +548,18 @@ define amdgpu_kernel void @minnum_v2f16_imm_a(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_min_f32_e32 v0, 0x40400000, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, 4.0, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_min_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -647,20 +637,18 @@ define amdgpu_kernel void @minnum_v2f16_imm_b(
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[2:3], 0x0
-; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_min_f32_e32 v0, 4.0, v0
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, 0x40400000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_min_f32_e32 v0, 0x40400000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_min_f32_e32 v1, 4.0, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -745,27 +733,21 @@ define amdgpu_kernel void @minnum_v3f16(
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
-; SI-NEXT: s_lshr_b32 s2, s2, 16
-; SI-NEXT: s_lshr_b32 s3, s0, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s3
+; SI-NEXT: s_lshr_b32 s3, s2, 16
+; SI-NEXT: s_lshr_b32 s8, s0, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s8
; SI-NEXT: v_cvt_f32_f16_e32 v3, s2
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_min_f32_e32 v2, v3, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, v1, v3
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_min_f32_e32 v0, v0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s0
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s1
+; SI-NEXT: v_min_f32_e32 v1, v1, v2
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_min_f32_e32 v2, v3, v4
+; SI-NEXT: v_min_f32_e32 v0, v0, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_or_b32_e32 v1, v1, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; SI-NEXT: buffer_store_dword v1, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -893,25 +875,17 @@ define amdgpu_kernel void @minnum_v4f16(
; SI-NEXT: v_cvt_f32_f16_e32 v2, s6
; SI-NEXT: s_lshr_b32 s6, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
; SI-NEXT: s_lshr_b32 s6, s5, 16
+; SI-NEXT: s_lshr_b32 s4, s4, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
; SI-NEXT: v_cvt_f32_f16_e32 v1, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_lshr_b32 s4, s4, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s5
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s5
; SI-NEXT: v_min_f32_e32 v3, v3, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v7
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_min_f32_e32 v1, v1, v5
-; SI-NEXT: v_mul_f32_e32 v5, 1.0, v6
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_min_f32_e32 v2, v2, v5
-; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; SI-NEXT: v_min_f32_e32 v2, v2, v7
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_min_f32_e32 v1, v1, v6
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_min_f32_e32 v0, v0, v4
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -1041,20 +1015,16 @@ define amdgpu_kernel void @fmin_v4f16_imm_a(
; SI-NEXT: v_cvt_f32_f16_e32 v1, s5
; SI-NEXT: s_lshr_b32 s5, s5, 16
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: s_lshr_b32 s4, s4, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; SI-NEXT: v_min_f32_e32 v2, 4.0, v2
-; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
; SI-NEXT: v_min_f32_e32 v1, 0x40400000, v1
+; SI-NEXT: v_min_f32_e32 v0, 0x41000000, v0
+; SI-NEXT: v_min_f32_e32 v2, 4.0, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_min_f32_e32 v3, 2.0, v3
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_min_f32_e32 v0, 0x41000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v1, v2
diff --git a/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll b/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll
new file mode 100644
index 0000000..f1d9463
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll
@@ -0,0 +1,47 @@
+
+; Default O0
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O0 -cg-opt-level 0 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O0
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O0 -cg-opt-level 0 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O1
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O1 -cg-opt-level 1 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O1
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O1 -cg-opt-level 1 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O2
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O2 -cg-opt-level 2 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O2
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O2 -cg-opt-level 2 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O3
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O3 -cg-opt-level 3 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O3
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O3 -cg-opt-level 3 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; First print will be from the New PM during the full LTO pipeline.
+; Second print will be from the legacy PM during the CG pipeline.
+
+; CHECK: Running pass: AMDGPULowerModuleLDSPass on [module]
+; CHECK: ModulePass Manager
+; CHECK: Lower uses of LDS variables from non-kernel functions
+
+@lds = internal unnamed_addr addrspace(3) global i32 poison, align 4
+
+define amdgpu_kernel void @test() {
+entry:
+ store i32 1, ptr addrspace(3) @lds
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll
index fb3e79b..5b7f0e7 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll
@@ -951,56 +951,70 @@ define <3 x half> @v_mad_mix_v3f32_clamp_postcvt(<3 x half> %src0, <3 x half> %s
; SDAG-GFX1100-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-GFX1100: ; %bb.0:
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1]
-; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v3, v0, v2, v4 op_sel_hi:[1,1,1] clamp
; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; SDAG-GFX1100-NEXT: v_pack_b32_f16 v1, v1, 0
-; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: v_pack_b32_f16 v0, v1, 0
; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; SDAG-GFX1100-NEXT: v_pk_max_f16 v1, v1, v1 clamp
-; SDAG-GFX1100-NEXT: v_mov_b32_e32 v0, v3
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v1, v6, 0
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v2, v0, 0
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v0, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v1, v2, 1.0 op_sel_hi:[1,0]
; SDAG-GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX900-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-GFX900: ; %bb.0:
; SDAG-GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1]
-; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v3, v0, v2, v4 op_sel_hi:[1,1,1] clamp
; SDAG-GFX900-NEXT: v_pack_b32_f16 v1, v1, 0
-; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; SDAG-GFX900-NEXT: v_pk_max_f16 v1, v1, v1 clamp
-; SDAG-GFX900-NEXT: v_mov_b32_e32 v0, v3
+; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_pk_max_f16 v1, v1, 0
+; SDAG-GFX900-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX900-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX900-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
; SDAG-GFX900-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX906-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-GFX906: ; %bb.0:
; SDAG-GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1]
-; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v3, v0, v2, v4 op_sel_hi:[1,1,1] clamp
; SDAG-GFX906-NEXT: v_pack_b32_f16 v1, v1, 0
-; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; SDAG-GFX906-NEXT: v_pk_max_f16 v1, v1, v1 clamp
-; SDAG-GFX906-NEXT: v_mov_b32_e32 v0, v3
+; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_pk_max_f16 v1, v1, 0
+; SDAG-GFX906-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX906-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX906-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
; SDAG-GFX906-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-VI-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; SDAG-VI: ; %bb.0:
; SDAG-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SDAG-VI-NEXT: v_mac_f32_e32 v8, v6, v7
; SDAG-VI-NEXT: v_mac_f32_e32 v4, v0, v2
-; SDAG-VI-NEXT: v_cvt_f16_f32_sdwa v0, v8 clamp dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v2, v4 clamp
; SDAG-VI-NEXT: v_mac_f32_e32 v5, v1, v3
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v1, v5 clamp
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v0, v8
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v1, v4
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v2, v5
+; SDAG-VI-NEXT: v_max_f16_e32 v0, 0, v0
+; SDAG-VI-NEXT: v_max_f16_e32 v3, 0, v1
+; SDAG-VI-NEXT: v_max_f16_e32 v1, 0, v2
+; SDAG-VI-NEXT: v_mov_b32_e32 v2, 0x3c00
+; SDAG-VI-NEXT: v_min_f16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; SDAG-VI-NEXT: v_min_f16_e32 v2, 1.0, v3
+; SDAG-VI-NEXT: v_min_f16_e32 v1, 1.0, v1
; SDAG-VI-NEXT: v_or_b32_e32 v0, v2, v0
; SDAG-VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1139,63 +1153,80 @@ define <3 x half> @v_mad_mix_v3f32_clamp_postcvt(<3 x half> %src0, <3 x half> %s
}
define <4 x half> @v_mad_mix_v4f32_clamp_postcvt(<4 x half> %src0, <4 x half> %src1, <4 x half> %src2) #0 {
-; GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt:
-; GFX1100: ; %bb.0:
-; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1100-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
-; GFX1100-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; SDAG-GFX1100: ; %bb.0:
+; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX1100-NEXT: v_pk_max_f16 v1, v7, 0
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX1100-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX1100-NEXT: s_setpc_b64 s[30:31]
;
-; GFX900-LABEL: v_mad_mix_v4f32_clamp_postcvt:
-; GFX900: ; %bb.0:
-; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mad_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mad_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX900-NEXT: v_mov_b32_e32 v0, v6
-; GFX900-NEXT: v_mov_b32_e32 v1, v2
-; GFX900-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX900-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; SDAG-GFX900: ; %bb.0:
+; SDAG-GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_mad_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX900-NEXT: v_pk_max_f16 v1, v7, 0
+; SDAG-GFX900-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX900-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX900-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX900-NEXT: s_setpc_b64 s[30:31]
;
-; GFX906-LABEL: v_mad_mix_v4f32_clamp_postcvt:
-; GFX906: ; %bb.0:
-; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_fma_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_fma_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
-; GFX906-NEXT: v_mov_b32_e32 v0, v6
-; GFX906-NEXT: v_mov_b32_e32 v1, v2
-; GFX906-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX906-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; SDAG-GFX906: ; %bb.0:
+; SDAG-GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1]
+; SDAG-GFX906-NEXT: v_pk_max_f16 v1, v7, 0
+; SDAG-GFX906-NEXT: v_pk_max_f16 v0, v6, 0
+; SDAG-GFX906-NEXT: v_pk_min_f16 v0, v0, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX906-NEXT: v_pk_min_f16 v1, v1, 1.0 op_sel_hi:[1,0]
+; SDAG-GFX906-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-VI-LABEL: v_mad_mix_v4f32_clamp_postcvt:
; SDAG-VI: ; %bb.0:
; SDAG-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v9, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v10, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v11, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v10, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG-VI-NEXT: v_cvt_f32_f16_sdwa v11, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SDAG-VI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SDAG-VI-NEXT: v_mac_f32_e32 v10, v7, v9
; SDAG-VI-NEXT: v_mac_f32_e32 v11, v6, v8
-; SDAG-VI-NEXT: v_mac_f32_e32 v5, v1, v3
; SDAG-VI-NEXT: v_mac_f32_e32 v4, v0, v2
-; SDAG-VI-NEXT: v_cvt_f16_f32_sdwa v0, v11 clamp dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; SDAG-VI-NEXT: v_cvt_f16_f32_sdwa v1, v10 clamp dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v2, v4 clamp
-; SDAG-VI-NEXT: v_cvt_f16_f32_e64 v3, v5 clamp
+; SDAG-VI-NEXT: v_mac_f32_e32 v5, v1, v3
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v0, v10
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v1, v11
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v2, v4
+; SDAG-VI-NEXT: v_cvt_f16_f32_e32 v3, v5
+; SDAG-VI-NEXT: v_max_f16_e32 v0, 0, v0
+; SDAG-VI-NEXT: v_max_f16_e32 v1, 0, v1
+; SDAG-VI-NEXT: v_max_f16_e32 v2, 0, v2
+; SDAG-VI-NEXT: v_max_f16_e32 v3, 0, v3
+; SDAG-VI-NEXT: v_mov_b32_e32 v4, 0x3c00
+; SDAG-VI-NEXT: v_min_f16_sdwa v1, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; SDAG-VI-NEXT: v_min_f16_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; SDAG-VI-NEXT: v_min_f16_e32 v3, 1.0, v3
+; SDAG-VI-NEXT: v_min_f16_e32 v2, 1.0, v2
; SDAG-VI-NEXT: v_or_b32_e32 v0, v2, v0
; SDAG-VI-NEXT: v_or_b32_e32 v1, v3, v1
; SDAG-VI-NEXT: s_setpc_b64 s[30:31]
@@ -1241,6 +1272,40 @@ define <4 x half> @v_mad_mix_v4f32_clamp_postcvt(<4 x half> %src0, <4 x half> %s
; SDAG-CI-NEXT: v_cvt_f32_f16_e64 v3, v3 clamp
; SDAG-CI-NEXT: s_setpc_b64 s[30:31]
;
+; GISEL-GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; GISEL-GFX1100: ; %bb.0:
+; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GISEL-GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-GFX1100-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GISEL-GFX1100-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX900-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; GISEL-GFX900: ; %bb.0:
+; GISEL-GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX900-NEXT: v_mad_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mad_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mad_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mad_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX900-NEXT: v_mov_b32_e32 v0, v6
+; GISEL-GFX900-NEXT: v_mov_b32_e32 v1, v2
+; GISEL-GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX906-LABEL: v_mad_mix_v4f32_clamp_postcvt:
+; GISEL-GFX906: ; %bb.0:
+; GISEL-GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX906-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_fma_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_fma_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp
+; GISEL-GFX906-NEXT: v_mov_b32_e32 v0, v6
+; GISEL-GFX906-NEXT: v_mov_b32_e32 v1, v2
+; GISEL-GFX906-NEXT: s_setpc_b64 s[30:31]
+;
; GISEL-VI-LABEL: v_mad_mix_v4f32_clamp_postcvt:
; GISEL-VI: ; %bb.0:
; GISEL-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir b/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir
new file mode 100644
index 0000000..d7f5d1a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir
@@ -0,0 +1,1154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GFX12 %s
+
+---
+name: buffer_load_dword_dwordx3
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx3_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx3_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+
+name: buffer_load_dword_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_32
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_32
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub3
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[COPY6]].sub0_sub1
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY killed [[COPY6]].sub2
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY8]].sub0
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY killed [[COPY8]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY killed [[COPY12]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %10:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 20, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %11:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 24, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %12:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 28, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %13:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %14:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 40, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %15:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 44, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+#
+# buffer_store_dword
+#
+
+name: buffer_store_dword_xyz
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_xyz
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[REG_SEQUENCE1]], %subreg.sub1_sub2_sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact %14:vreg_96, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx3_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx3_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[COPY]], %subreg.sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact %14:vreg_96, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx2_dwordx2
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[REG_SEQUENCE2]], %subreg.sub2_sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE3]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ %15:vreg_64 = REG_SEQUENCE %6:vgpr_32, %subreg.sub0, %7:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %14:vreg_64, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %15:vreg_64, %13:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_dwordx2
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, %10:vreg_64, %subreg.sub1_sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %15:vreg_64, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx2_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %14:vreg_64, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %6:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_32
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GFX12-LABEL: name: buffer_store_dword_32
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr7
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY11]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY9]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE2]], %subreg.sub0_sub1, [[COPY4]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE killed [[REG_SEQUENCE3]], %subreg.sub0_sub1_sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE4]], [[REG_SEQUENCE]], $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE5]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE6]], [[REG_SEQUENCE]], $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %12:vgpr_32 = COPY $vgpr8
+ %11:vgpr_32 = COPY $vgpr7
+ %10:vgpr_32 = COPY $vgpr6
+ %9:vgpr_32 = COPY $vgpr5
+ %8:vgpr_32 = COPY $vgpr4
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %5:vgpr_32, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %6:vgpr_32, %13:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 20, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %8:vgpr_32, %13:sgpr_128, $sgpr_null, 24, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %9:vgpr_32, %13:sgpr_128, $sgpr_null, 28, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %10:vgpr_32, %13:sgpr_128, $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %11:vgpr_32, %13:sgpr_128, $sgpr_null, 40, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %12:vgpr_32, %13:sgpr_128, $sgpr_null, 44, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merged_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_1
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merged_swizzled_1
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzle
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_merge_across_swizzle
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %5:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merge_across_swizzled_store
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %5:sgpr_128, $sgpr_null, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_merge_across_swizzled_store
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %5:sgpr_128, $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:vreg_64 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %6:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE1]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vreg_64 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %7:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:vgpr_32 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %6:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE1]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vgpr_32 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %7:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-buffer.mir b/llvm/test/CodeGen/AMDGPU/merge-buffer.mir
new file mode 100644
index 0000000..1c6d429
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merge-buffer.mir
@@ -0,0 +1,1130 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+
+---
+name: buffer_load_dword_dwordx3
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx3_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx3_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1_sub2
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+
+name: buffer_load_dword_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_32
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_32
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1_sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub3
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[COPY6]].sub0_sub1
+ ; GCN-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY killed [[COPY6]].sub2
+ ; GCN-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY8]].sub0
+ ; GCN-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY killed [[COPY8]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 36, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub2
+ ; GCN-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GCN-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY killed [[COPY12]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %10:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 20, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %11:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 24, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %12:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 28, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %13:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 36, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %14:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 40, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %15:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 44, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+#
+# buffer_store_dword
+#
+
+name: buffer_store_dword_xyz
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_xyz
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[REG_SEQUENCE1]], %subreg.sub1_sub2_sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX3_OFFSET_exact %14:vreg_96, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx3_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx3_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[COPY]], %subreg.sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORDX3_OFFSET_exact %14:vreg_96, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx2_dwordx2
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[REG_SEQUENCE2]], %subreg.sub2_sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE3]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ %15:vreg_64 = REG_SEQUENCE %6:vgpr_32, %subreg.sub0, %7:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_OFFSET_exact %14:vreg_64, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_OFFSET_exact %15:vreg_64, %13:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_dwordx2
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, %10:vreg_64, %subreg.sub1_sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_OFFSET_exact %15:vreg_64, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx2_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_OFFSET_exact %14:vreg_64, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_OFFSET_exact %6:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_32
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GCN-LABEL: name: buffer_store_dword_32
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr7
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY11:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY12:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY11]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY9]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GCN-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE2]], %subreg.sub0_sub1, [[COPY4]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE killed [[REG_SEQUENCE3]], %subreg.sub0_sub1_sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE4]], [[REG_SEQUENCE]], 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE5]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE6]], [[REG_SEQUENCE]], 0, 36, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %12:vgpr_32 = COPY $vgpr8
+ %11:vgpr_32 = COPY $vgpr7
+ %10:vgpr_32 = COPY $vgpr6
+ %9:vgpr_32 = COPY $vgpr5
+ %8:vgpr_32 = COPY $vgpr4
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_OFFSET_exact %4:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %5:vgpr_32, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %6:vgpr_32, %13:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 20, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %8:vgpr_32, %13:sgpr_128, 0, 24, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %9:vgpr_32, %13:sgpr_128, 0, 28, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %10:vgpr_32, %13:sgpr_128, 0, 36, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %11:vgpr_32, %13:sgpr_128, 0, 40, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %12:vgpr_32, %13:sgpr_128, 0, 44, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_not_merged_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_1
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_not_merged_swizzled_1
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzle
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_merge_across_swizzle
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %5:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_merge_across_swizzled_store
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], 0, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %4:vgpr_32, %5:sgpr_128, 0, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_IDXEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_BOTHEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:vreg_64 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %6:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE1]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vreg_64 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %7:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:vgpr_32 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %6:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE1]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vgpr_32 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %7:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir b/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
index c86b5ad..9766b42 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
@@ -7,9 +7,37 @@
# GFX9 tests
#
+---
name: gfx9_tbuffer_load_x_xyz
body: |
bb.0.entry:
+ ; GFX9-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX9: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX9-NEXT: [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET:%[0-9]+]]:vreg_128 = TBUFFER_LOAD_FORMAT_XYZW_OFFSET [[REG_SEQUENCE]], 0, 4, 126, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX9-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET]].sub0
+ ; GFX9-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET]].sub1_sub2_sub3
+ ;
+ ; GFX10-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX10: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX10-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX10-NEXT: [[TBUFFER_LOAD_FORMAT_X_OFFSET:%[0-9]+]]:vgpr_32 = TBUFFER_LOAD_FORMAT_X_OFFSET [[REG_SEQUENCE]], 0, 4, 116, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX10-NEXT: [[TBUFFER_LOAD_FORMAT_XYZ_OFFSET:%[0-9]+]]:vreg_96 = TBUFFER_LOAD_FORMAT_XYZ_OFFSET [[REG_SEQUENCE]], 0, 8, 125, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ;
+ ; GFX11-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX11: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[TBUFFER_LOAD_FORMAT_X_OFFSET:%[0-9]+]]:vgpr_32 = TBUFFER_LOAD_FORMAT_X_OFFSET [[REG_SEQUENCE]], 0, 4, 116, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX11-NEXT: [[TBUFFER_LOAD_FORMAT_XYZ_OFFSET:%[0-9]+]]:vreg_96 = TBUFFER_LOAD_FORMAT_XYZ_OFFSET [[REG_SEQUENCE]], 0, 8, 125, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
%0:sgpr_32 = COPY $sgpr0
%1:sgpr_32 = COPY $sgpr1
%2:sgpr_32 = COPY $sgpr2
diff --git a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
index cbdc7bb..69971bc 100644
--- a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
+++ b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
@@ -27,7 +27,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-LABEL: csr_vgpr_spill_fp_callee:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s24, s33
+; CHECK-NEXT: s_mov_b32 s18, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
@@ -43,7 +43,6 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; clobber csr v40
@@ -55,7 +54,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s24
+; CHECK-NEXT: s_mov_b32 s33, s18
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
bb:
@@ -88,7 +87,6 @@ define amdgpu_kernel void @kernel_call() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
bb:
@@ -148,7 +146,6 @@ define amdgpu_kernel void @kernel_tailcall() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
bb:
@@ -173,7 +170,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-LABEL: caller_save_vgpr_spill_fp_tail_call:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s24, s33
+; CHECK-NEXT: s_mov_b32 s18, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v1, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -188,7 +185,6 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_readlane_b32 s31, v1, 1
; CHECK-NEXT: v_readlane_b32 s30, v1, 0
@@ -196,7 +192,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s33 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s24
+; CHECK-NEXT: s_mov_b32 s33, s18
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -208,7 +204,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-LABEL: caller_save_vgpr_spill_fp:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s25, s33
+; CHECK-NEXT: s_mov_b32 s19, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v2, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -223,7 +219,6 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_readlane_b32 s31, v2, 1
; CHECK-NEXT: v_readlane_b32 s30, v2, 0
@@ -231,7 +226,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s33 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s25
+; CHECK-NEXT: s_mov_b32 s33, s19
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -263,7 +258,6 @@ define protected amdgpu_kernel void @kernel() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir b/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
index 3de258b..bf2cf6a 100644
--- a/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
+++ b/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
@@ -5,6 +5,14 @@
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -amdgpu-mfma-padding-ratio=75 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx908-PAD75 %s
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx908-PAD100 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-DEFAULT %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-mfma-padding-ratio=50 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-PAD50 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-PAD100 %s
+
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-DEFAULT %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-mfma-padding-ratio=50 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-PAD50 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-PAD100 %s
+
---
name: mfma_padding_2_pass
body: |
@@ -31,6 +39,35 @@ body: |
; gfx908-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 1
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 0
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 1
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_NOP 1
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 1
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -64,6 +101,40 @@ body: |
; gfx908-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 0
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 0
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_NOP 0
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 0
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 0
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
@@ -100,6 +171,41 @@ body: |
; gfx908-PAD100-NEXT: DBG_VALUE
; gfx908-PAD100-NEXT: S_NOP 1
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: DBG_VALUE
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: DBG_VALUE
+ ; gfx90a-PAD50-NEXT: S_NOP 0
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: DBG_VALUE
+ ; gfx90a-PAD100-NEXT: S_NOP 1
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: DBG_VALUE
+ ; gfx940-DEFAULT-NEXT: S_NOP 1
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: DBG_VALUE
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: DBG_VALUE
+ ; gfx940-PAD100-NEXT: S_NOP 1
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
DBG_VALUE
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
@@ -132,6 +238,34 @@ body: |
; gfx908-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 3
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_8_pass
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_8_pass
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 3
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_8_pass
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -172,6 +306,46 @@ body: |
; gfx908-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 5
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 1
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 5
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 5
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -207,6 +381,36 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 7
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 7
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -258,6 +462,60 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 3
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 3
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 3
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 3
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 3
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -369,6 +627,126 @@ body: |
; gfx908-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -414,6 +792,30 @@ body: |
; gfx908-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
; gfx908-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -506,6 +908,108 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 5
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-DEFAULT: bb.0:
+ ; gfx90a-DEFAULT-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: bb.1:
+ ; gfx90a-DEFAULT-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: bb.2:
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-PAD50: bb.0:
+ ; gfx90a-PAD50-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: bb.1:
+ ; gfx90a-PAD50-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: bb.2:
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 5
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-PAD100: bb.0:
+ ; gfx90a-PAD100-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: bb.1:
+ ; gfx90a-PAD100-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: bb.2:
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 5
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-DEFAULT: bb.0:
+ ; gfx940-DEFAULT-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: bb.1:
+ ; gfx940-DEFAULT-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: bb.2:
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-PAD50: bb.0:
+ ; gfx940-PAD50-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: bb.1:
+ ; gfx940-PAD50-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: bb.2:
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 5
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-PAD100: bb.0:
+ ; gfx940-PAD100-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: bb.1:
+ ; gfx940-PAD100-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: bb.2:
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 5
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
bb.0:
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
diff --git a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
index 34e67d0..9999cb9 100644
--- a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
@@ -32,7 +32,6 @@ define hidden void @_ZL3barv() #0 !dbg !1644 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: .Ltmp1:
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
new file mode 100644
index 0000000..538ce15
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
@@ -0,0 +1,305 @@
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK: .amdgpu_pal_metadata
+; CHECK-NEXT: ---
+; CHECK-NEXT: amdpal.pipelines:
+; CHECK-NEXT: - .api: Vulkan
+; CHECK-NEXT: .compute_registers:
+; CHECK-NEXT: .tg_size_en: true
+; CHECK-NEXT: .tgid_x_en: false
+; CHECK-NEXT: .tgid_y_en: false
+; CHECK-NEXT: .tgid_z_en: false
+; CHECK-NEXT: .tidig_comp_cnt: 0x1
+; CHECK-NEXT: .hardware_stages:
+; CHECK-NEXT: .cs:
+; CHECK-NEXT: .checksum_value: 0x9444d7d0
+; CHECK-NEXT: .debug_mode: 0
+; CHECK-NEXT: .excp_en: 0
+; CHECK-NEXT: .float_mode: 0xc0
+; CHECK-NEXT: .ieee_mode: true
+; CHECK-NEXT: .image_op: false
+; CHECK-NEXT: .lds_size: 0x200
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .sgpr_limit: 0x6a
+; CHECK-NEXT: .threadgroup_dimensions:
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: - 0x400
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: .trap_present: false
+; CHECK-NEXT: .user_data_reg_map:
+; CHECK-NEXT: - 0x10000000
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: .user_sgprs: 0x3
+; CHECK-NEXT: .vgpr_limit: 0x100
+; CHECK-NEXT: .wavefront_size: 0x40
+; CHECK-NEXT: .wgp_mode: true
+; CHECK: .registers: {}
+; CHECK-NEXT: .shader_functions:
+; CHECK-NEXT: dynamic_stack:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x2
+; CHECK-NEXT: dynamic_stack_loop:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x3
+; CHECK-NEXT: multiple_stack:
+; CHECK-NEXT: .backend_stack_size: 0x24
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x21
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x24
+; CHECK-NEXT: .vgpr_count: 0x3
+; CHECK-NEXT: no_stack:
+; CHECK-NEXT: .backend_stack_size: 0
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x20
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: no_stack_call:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x3
+; CHECK-NEXT: no_stack_extern_call:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: no_stack_extern_call_many_args:
+; CHECK-NEXT: .backend_stack_size: 0x90
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x90
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: no_stack_indirect_call:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: simple_lds:
+; CHECK-NEXT: .backend_stack_size: 0
+; CHECK-NEXT: .lds_size: 0x100
+; CHECK-NEXT: .sgpr_count: 0x20
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: simple_lds_recurse:
+; CHECK-NEXT: .backend_stack_size: 0x10
+; CHECK-NEXT: .lds_size: 0x100
+; CHECK-NEXT: .sgpr_count: 0x24
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x10
+; CHECK-NEXT: .vgpr_count: 0x29
+; CHECK-NEXT: simple_stack:
+; CHECK-NEXT: .backend_stack_size: 0x14
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x21
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x14
+; CHECK-NEXT: .vgpr_count: 0x2
+; CHECK-NEXT: simple_stack_call:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x4
+; CHECK-NEXT: simple_stack_extern_call:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: simple_stack_indirect_call:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x29
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x58
+; CHECK-NEXT: simple_stack_recurse:
+; CHECK-NEXT: .backend_stack_size: 0x20
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .sgpr_count: 0x24
+; CHECK-NEXT: .stack_frame_size_in_bytes: 0x20
+; CHECK-NEXT: .vgpr_count: 0x2a
+; CHECK:amdpal.version:
+; CHECK-NEXT: - 0x3
+; CHECK-NEXT: - 0
+; CHECK-NEXT:...
+; CHECK-NEXT: .end_amdgpu_pal_metadata
+
+declare amdgpu_gfx float @extern_func(float) #0
+declare amdgpu_gfx float @extern_func_many_args(<64 x float>) #0
+
+@funcptr = external hidden unnamed_addr addrspace(4) constant ptr, align 4
+
+define amdgpu_gfx float @no_stack(float %arg0) #0 {
+ %add = fadd float %arg0, 1.0
+ ret float %add
+}
+
+define amdgpu_gfx float @simple_stack(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @multiple_stack(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ %stack2 = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack2
+ %val2 = load volatile float, ptr addrspace(5) %stack2
+ %add2 = fadd float %add, %val2
+ ret float %add2
+}
+
+define amdgpu_gfx float @dynamic_stack(float %arg0) #0 {
+bb0:
+ %cmp = fcmp ogt float %arg0, 0.0
+ br i1 %cmp, label %bb1, label %bb2
+
+bb1:
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ br label %bb2
+
+bb2:
+ %res = phi float [ 0.0, %bb0 ], [ %add, %bb1 ]
+ ret float %res
+}
+
+define amdgpu_gfx float @dynamic_stack_loop(float %arg0) #0 {
+bb0:
+ br label %bb1
+
+bb1:
+ %ctr = phi i32 [ 0, %bb0 ], [ %newctr, %bb1 ]
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %add = fadd float %arg0, %val
+ %cmp = icmp sgt i32 %ctr, 0
+ %newctr = sub i32 %ctr, 1
+ br i1 %cmp, label %bb1, label %bb2
+
+bb2:
+ ret float %add
+}
+
+define amdgpu_gfx float @no_stack_call(float %arg0) #0 {
+ %res = call amdgpu_gfx float @simple_stack(float %arg0)
+ ret float %res
+}
+
+define amdgpu_gfx float @simple_stack_call(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %res = call amdgpu_gfx float @simple_stack(float %arg0)
+ %add = fadd float %res, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @no_stack_extern_call(float %arg0) #0 {
+ %res = call amdgpu_gfx float @extern_func(float %arg0)
+ ret float %res
+}
+
+define amdgpu_gfx float @simple_stack_extern_call(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %res = call amdgpu_gfx float @extern_func(float %arg0)
+ %add = fadd float %res, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @no_stack_extern_call_many_args(<64 x float> %arg0) #0 {
+ %res = call amdgpu_gfx float @extern_func_many_args(<64 x float> %arg0)
+ ret float %res
+}
+
+define amdgpu_gfx float @no_stack_indirect_call(float %arg0) #0 {
+ %fptr = load ptr, ptr addrspace(4) @funcptr
+ call amdgpu_gfx void %fptr()
+ ret float %arg0
+}
+
+define amdgpu_gfx float @simple_stack_indirect_call(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %fptr = load ptr, ptr addrspace(4) @funcptr
+ call amdgpu_gfx void %fptr()
+ %add = fadd float %arg0, %val
+ ret float %add
+}
+
+define amdgpu_gfx float @simple_stack_recurse(float %arg0) #0 {
+ %stack = alloca float, i32 4, align 4, addrspace(5)
+ store volatile float 2.0, ptr addrspace(5) %stack
+ %val = load volatile float, ptr addrspace(5) %stack
+ %res = call amdgpu_gfx float @simple_stack_recurse(float %arg0)
+ %add = fadd float %res, %val
+ ret float %add
+}
+
+@lds = internal addrspace(3) global [64 x float] undef
+
+define amdgpu_gfx float @simple_lds(float %arg0) #0 {
+ %val = load float, ptr addrspace(3) @lds
+ ret float %val
+}
+
+define amdgpu_gfx float @simple_lds_recurse(float %arg0) #0 {
+ %val = load float, ptr addrspace(3) @lds
+ %res = call amdgpu_gfx float @simple_lds_recurse(float %val)
+ ret float %res
+}
+
+attributes #0 = { nounwind }
+
+!amdgpu.pal.metadata.msgpack = !{!0}
+
+!0 = !{!"\82\B0amdpal.pipelines\91\8A\A4.api\A6Vulkan\B2.compute_registers\85\AB.tg_size_en\C3\AA.tgid_x_en\C2\AA.tgid_y_en\C2\AA.tgid_z_en\C2\AF.tidig_comp_cnt\01\B0.hardware_stages\81\A3.cs\8C\AF.checksum_value\CE\94D\D7\D0\AB.debug_mode\00\AB.float_mode\CC\C0\A9.image_op\C2\AC.mem_ordered\C3\AB.sgpr_limitj\B7.threadgroup_dimensions\93\01\CD\04\00\01\AD.trap_present\00\B2.user_data_reg_map\DC\00 \CE\10\00\00\00\CE\FF\FF\FF\FF\00\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\AB.user_sgprs\03\AB.vgpr_limit\CD\01\00\AF.wavefront_size@\B7.internal_pipeline_hash\92\CF\E7\10k\A6:\A6%\F7\CF\B2\1F\1A\D4{\DA\E1T\AA.registers\80\A8.shaders\81\A8.compute\82\B0.api_shader_hash\92\CF\E9Zn7}\1E\B9\E7\00\B1.hardware_mapping\91\A3.cs\B0.spill_threshold\CE\FF\FF\FF\FF\A5.type\A2Cs\B0.user_data_limit\01\AF.xgl_cache_info\82\B3.128_bit_cache_hash\92\CF\B4X\B8\11[\A4\88P\CF\A0;\B0\AF\FF\B4\BE\C0\AD.llpc_version\A461.1\AEamdpal.version\92\03\00"}
+!1 = !{i32 7}
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
index a70488a..a030f86 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
@@ -1,17 +1,20 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,HSA %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,NON-HSA %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,HSA,ASM %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA,OBJ %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,NON-HSA,OBJ %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=obj | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA,OBJ %s
; GCN: preload_kernarg_header
; HSA: s_trap 2
; NON-HSA: s_endpgm
-; GCN-COUNT-63: s_nop 0
+; ASM: .fill 63, 4, 0xbf800000 ; s_nop 0
+; OBJ-COUNT-63: s_nop 0
define amdgpu_kernel void @preload_kernarg_header(ptr %arg) {
store ptr %arg, ptr %arg
ret void
}
; GCN: non_kernel_function
+; GCN-NOT: s_trap 2
; GCN-NOT: s_nop 0
; GCN: flat_store
define void @non_kernel_function(ptr %arg) {
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
index e7488e0..20edbd6 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
@@ -157,27 +157,27 @@ define amdgpu_kernel void @test_preload_hint_kernel_1_call_func(ptr %0) #0 {
define amdgpu_kernel void @test_preload_hint_kernel_1_call_intrinsic(i16 %0) #0 {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; NO-PRELOAD-SAME: (i16 [[TMP0:%.*]]) #[[ATTR2]] {
+; NO-PRELOAD-SAME: (i16 [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; NO-PRELOAD-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-1-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-1-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-1-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-3-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-3-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-3-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-3-NEXT: ret void
;
; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-16-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-16-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-16-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-16-NEXT: ret void
;
; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-20-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-20-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-20-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-20-NEXT: ret void
;
@@ -235,23 +235,23 @@ define amdgpu_kernel void @test_preload_hint_kernel_2_preexisting(i32 inreg %0,
define amdgpu_kernel void @test_preload_hint_kernel_incompatible_attributes(ptr addrspace(4) byref(i32) %0, ptr nest %1) {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; NO-PRELOAD-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; NO-PRELOAD-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-1-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-1-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-3-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-3-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-3-NEXT: ret void
;
; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-16-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-16-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-16-NEXT: ret void
;
; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-20-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-20-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-20-NEXT: ret void
;
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
index d20c3a4..f0e709b 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
@@ -24,70 +24,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i8:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -98,70 +36,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i8:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -170,70 +46,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i8:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -242,70 +56,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i8:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -325,70 +77,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -399,70 +89,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -471,70 +99,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -543,70 +109,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -631,70 +135,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -705,70 +147,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
@@ -778,70 +158,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
@@ -851,70 +169,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
@@ -935,70 +191,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1009,70 +203,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
@@ -1082,70 +214,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
@@ -1155,70 +225,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
@@ -1244,70 +252,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1318,70 +264,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -1390,70 +274,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -1462,70 +284,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -1545,70 +305,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1619,70 +317,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -1691,70 +327,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -1763,70 +337,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -1850,70 +362,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1923,70 +373,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
@@ -1994,70 +382,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
@@ -2065,70 +391,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
@@ -2146,70 +410,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -2219,70 +421,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
@@ -2290,70 +430,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
@@ -2361,70 +439,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
@@ -2449,70 +465,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s3, s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
@@ -2524,70 +478,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x10
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -2598,70 +490,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_add_i32 s0, s2, s6
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -2670,70 +500,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_add_i32 s0, s2, s6
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -2754,70 +522,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s2, s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
@@ -2829,70 +535,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x10
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -2903,70 +547,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_add_i32 s0, s6, s10
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -2975,70 +557,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_add_i32 s0, s6, s10
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -3065,70 +585,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3141,70 +599,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-2-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3217,70 +613,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
; GFX940-PRELOAD-4-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3291,70 +625,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
; GFX940-PRELOAD-8-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3378,70 +650,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3454,70 +664,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3530,70 +678,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3604,70 +690,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3695,70 +719,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3768,70 +730,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3841,70 +741,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3914,70 +752,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3997,70 +773,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4070,70 +784,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4143,70 +795,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4216,70 +806,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4308,70 +836,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4385,70 +851,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -4462,70 +866,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -4539,70 +881,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -4630,70 +910,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4707,70 +925,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -4784,70 +940,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -4861,70 +955,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -4964,70 +996,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v8i32_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
@@ -5046,70 +1016,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v8i32_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
@@ -5128,70 +1036,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v8i32_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
@@ -5210,70 +1056,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v8i32_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
@@ -5311,70 +1095,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
@@ -5393,70 +1115,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
@@ -5475,70 +1135,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
@@ -5557,70 +1155,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
@@ -5654,70 +1190,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -5729,70 +1203,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
@@ -5802,70 +1214,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
@@ -5875,70 +1225,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
@@ -5959,70 +1247,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -6034,70 +1260,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
@@ -6107,70 +1271,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
@@ -6180,70 +1282,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
@@ -6269,70 +1309,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6344,70 +1322,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
@@ -6417,70 +1333,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
@@ -6490,70 +1344,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
@@ -6575,70 +1367,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6650,70 +1380,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
@@ -6723,70 +1391,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s11
@@ -6796,70 +1402,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
@@ -6885,70 +1429,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6960,70 +1442,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
@@ -7033,70 +1453,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
@@ -7106,70 +1464,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
@@ -7191,70 +1487,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -7266,70 +1500,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
@@ -7339,70 +1511,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
@@ -7412,70 +1522,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
@@ -7500,70 +1548,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -7575,70 +1561,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7655,70 +1579,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7735,70 +1597,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7826,70 +1626,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -7901,70 +1639,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7981,70 +1657,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -8061,70 +1675,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -8167,70 +1719,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v5f64_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8252,70 +1742,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v5f64_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8337,70 +1765,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v5f64_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8422,70 +1788,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v5f64_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8529,70 +1833,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8614,70 +1856,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8699,70 +1879,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8784,70 +1902,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8882,70 +1938,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -8955,70 +1949,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9042,70 +1974,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9129,70 +1999,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9225,70 +2033,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9298,70 +2044,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9384,70 +2068,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9470,70 +2092,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9570,70 +2130,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9643,70 +2141,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9714,70 +2150,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9785,70 +2159,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9866,70 +2178,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9939,70 +2189,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10010,70 +2198,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10081,70 +2207,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10166,70 +2230,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -10239,70 +2241,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10310,70 +2250,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10381,70 +2259,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10462,70 +2278,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -10535,70 +2289,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10606,70 +2298,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10677,70 +2307,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll
new file mode 100644
index 0000000..ab03177
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-scoring.ll
@@ -0,0 +1,69 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -debug-only=amdgpu-promote-alloca -amdgpu-promote-alloca-to-vector-limit=512 -passes=amdgpu-promote-alloca %s -o - 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: Scoring: %simpleuser = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: [+1]: store i32 42, ptr addrspace(5) %simpleuser, align 4
+; CHECK-NEXT: => Final Score:1
+; CHECK-NEXT: Scoring: %manyusers = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: [+1]: store i32 %v0.ext, ptr addrspace(5) %manyusers.1, align 4
+; CHECK-NEXT: [+1]: %v0 = load i8, ptr addrspace(5) %manyusers.1, align 1
+; CHECK-NEXT: [+1]: store i32 %v1.ext, ptr addrspace(5) %manyusers.2, align 4
+; CHECK-NEXT: [+1]: %v1 = load i8, ptr addrspace(5) %manyusers.2, align 1
+; CHECK-NEXT: => Final Score:4
+; CHECK-NEXT: Sorted Worklist:
+; CHECK-NEXT: %manyusers = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: %simpleuser = alloca [4 x i64], align 4, addrspace(5)
+define amdgpu_kernel void @simple_users_scores() #0 {
+entry:
+ ; should get a score of 1
+ %simpleuser = alloca [4 x i64], align 4, addrspace(5)
+ ; should get a score of 4
+ %manyusers = alloca [4 x i64], align 4, addrspace(5)
+
+ store i32 42, ptr addrspace(5) %simpleuser
+
+ %manyusers.1 = getelementptr i8, ptr addrspace(5) %manyusers, i64 2
+ %v0 = load i8, ptr addrspace(5) %manyusers.1
+ %v0.ext = zext i8 %v0 to i32
+ store i32 %v0.ext, ptr addrspace(5) %manyusers.1
+
+ %manyusers.2 = getelementptr i8, ptr addrspace(5) %manyusers, i64 1
+ %v1 = load i8, ptr addrspace(5) %manyusers.2
+ %v1.ext = zext i8 %v0 to i32
+ store i32 %v1.ext, ptr addrspace(5) %manyusers.2
+
+ ret void
+}
+
+; CHECK: Scoring: %stack = alloca [4 x i64], align 4, addrspace(5)
+; CHECK-NEXT: [+5]: store i32 32, ptr addrspace(5) %stack, align 4
+; CHECK-NEXT: [+1]: store i32 42, ptr addrspace(5) %stack, align 4
+; CHECK-NEXT: [+9]: store i32 32, ptr addrspace(5) %stack.1, align 4
+; CHECK-NEXT: [+5]: %outer.cmp = load i1, ptr addrspace(5) %stack.1, align 1
+; CHECK-NEXT: [+1]: store i32 64, ptr addrspace(5) %stack.2, align 4
+; CHECK-NEXT: [+9]: %inner.cmp = load i1, ptr addrspace(5) %stack.2, align 1
+; CHECK-NEXT: => Final Score:30
+define amdgpu_kernel void @loop_users_alloca(i1 %x, i2) #0 {
+entry:
+ ; should get a score of 1
+ %stack = alloca [4 x i64], align 4, addrspace(5)
+ %stack.1 = getelementptr i8, ptr addrspace(5) %stack, i64 4
+ %stack.2 = getelementptr i8, ptr addrspace(5) %stack, i64 8
+
+ store i32 42, ptr addrspace(5) %stack
+ br label %loop.outer
+
+loop.outer:
+ store i32 32, ptr addrspace(5) %stack
+ %outer.cmp = load i1, ptr addrspace(5) %stack.1
+ br label %loop.inner
+
+loop.inner:
+ store i32 32, ptr addrspace(5) %stack.1
+ %inner.cmp = load i1, ptr addrspace(5) %stack.2
+ br i1 %inner.cmp, label %loop.inner, label %loop.outer
+
+exit:
+ store i32 64, ptr addrspace(5) %stack.2
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
index d92ba77..d070dc3 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
@@ -203,13 +203,13 @@ attributes #5 = { "amdgpu-flat-work-group-size"="128,512" }
attributes #6 = { "amdgpu-flat-work-group-size"="512,512" }
attributes #7 = { "amdgpu-flat-work-group-size"="64,256" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll b/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
index 2df219b..f62f1d5 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
@@ -399,26 +399,26 @@ attributes #17 = { "amdgpu-waves-per-eu"="5,8" }
attributes #18 = { "amdgpu-waves-per-eu"="9,10" }
attributes #19 = { "amdgpu-waves-per-eu"="8,9" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,2" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,4" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,2" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR8]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR10]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR11]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="0,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR12]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,123" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR13]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR14]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,6" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR15]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR16]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR17]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="5,5" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR18]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR19]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR20]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR21]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,2" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,4" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,2" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR10]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR11]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="0,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR12]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,123" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR13]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR14]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,6" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR15]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR16]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR17]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="5,5" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR18]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR19]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR20]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR21]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,9" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
index 2ccc241..fdfc9b0 100644
--- a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
+++ b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
@@ -24,6 +24,7 @@ registers:
- { id: 10, class: sreg_64_xexec, preferred-register: '$vcc' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
maxKernArgAlign: 1
diff --git a/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll b/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
index eaef63b..c1d647c 100644
--- a/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
+++ b/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
@@ -19,5 +19,5 @@ define void @hoge() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll b/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
index 297a056..384a9c4 100644
--- a/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
+++ b/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
@@ -191,11 +191,11 @@ define amdgpu_kernel void @kernel_lds_recursion() {
!1 = !{i32 1, !"amdhsa_code_object_version", i32 400}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-lds-size"="2" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-lds-size"="2" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR3]] = { "amdgpu-lds-size"="4" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-lds-size"="2" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-lds-size"="2" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR5:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
; CHECK: attributes #[[ATTR6:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
index c0d1999..0903770 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
@@ -181,6 +181,8 @@ legalized: false
regBankSelected: false
selected: false
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
index efbdbca..c6ccbd9 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
@@ -78,6 +78,7 @@
name: sgpr_spill_wrong_stack_id
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
index 764f494..f523b4a 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
@@ -16,7 +16,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-LABEL: spill_sgpr_with_no_lower_vgpr_available:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_or_saveexec_b64 s[16:17], -1
; GCN-NEXT: buffer_store_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Spill
@@ -150,7 +150,6 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: v_readlane_b32 s31, v255, 1
@@ -270,7 +269,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-NEXT: buffer_load_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[4:5]
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, align 4, addrspace(5)
@@ -311,7 +310,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-LABEL: spill_to_lowest_available_vgpr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_or_saveexec_b64 s[16:17], -1
; GCN-NEXT: buffer_store_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -444,7 +443,6 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: v_readlane_b32 s31, v254, 1
@@ -563,7 +561,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-NEXT: buffer_load_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[4:5]
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, align 4, addrspace(5)
@@ -1530,7 +1528,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-LABEL: spill_sgpr_no_free_vgpr_ipra:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_add_i32 s32, s32, 0x7400
; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -1668,7 +1666,6 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: s_mov_b64 s[4:5], exec
; GCN-NEXT: s_mov_b64 exec, 1
@@ -1801,7 +1798,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:440 ; 4-byte Folded Reload
; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
call void @child_function_ipra()
diff --git a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
index f229f33..539cfc7 100644
--- a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
@@ -73,7 +73,7 @@ define amdgpu_kernel void @test_simple_indirect_call() {
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir b/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
index 3558298..f8ec6bb 100644
--- a/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
+++ b/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
@@ -21,6 +21,7 @@
name: kernel
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
index b8bc01e..c6a5990 100644
--- a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
+++ b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
@@ -916,13 +916,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-O0: ; %bb.0:
; WAVE32-O0-NEXT: s_mov_b32 s32, 0x1200
-; WAVE32-O0-NEXT: s_getpc_b64 s[24:25]
-; WAVE32-O0-NEXT: s_mov_b32 s24, s0
-; WAVE32-O0-NEXT: s_load_dwordx4 s[24:27], s[24:25], 0x0
+; WAVE32-O0-NEXT: s_getpc_b64 s[20:21]
+; WAVE32-O0-NEXT: s_mov_b32 s20, s0
+; WAVE32-O0-NEXT: s_load_dwordx4 s[20:23], s[20:21], 0x0
; WAVE32-O0-NEXT: s_waitcnt lgkmcnt(0)
-; WAVE32-O0-NEXT: s_bitset0_b32 s27, 21
-; WAVE32-O0-NEXT: s_add_u32 s24, s24, s9
-; WAVE32-O0-NEXT: s_addc_u32 s25, s25, 0
+; WAVE32-O0-NEXT: s_bitset0_b32 s23, 21
+; WAVE32-O0-NEXT: s_add_u32 s20, s20, s9
+; WAVE32-O0-NEXT: s_addc_u32 s21, s21, 0
; WAVE32-O0-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
; WAVE32-O0-NEXT: s_mov_b32 s14, s8
; WAVE32-O0-NEXT: s_mov_b32 s13, s7
@@ -934,17 +934,17 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 0
; WAVE32-O0-NEXT: s_lshr_b32 s0, s0, 5
; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 1
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s20, -1
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:128 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s20
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:128 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 42
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0
; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
-; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
-; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
+; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
+; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
; WAVE32-O0-NEXT: s_mov_b32 s6, s32
; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 17
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], s6 offset:4
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], s6 offset:4
; WAVE32-O0-NEXT: s_mov_b32 s6, stack_passed_argument@abs32@hi
; WAVE32-O0-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
; WAVE32-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
@@ -1018,11 +1018,10 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s20, -1
-; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 offset:128 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s20
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[20:23], 0 offset:128 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: v_readlane_b32 s1, v0, 1
; WAVE32-O0-NEXT: v_readlane_b32 s0, v0, 0
@@ -1137,7 +1136,6 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE64-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE64-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 offset:128 ; 4-byte Folded Reload
@@ -1155,13 +1153,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-WWM-PREALLOC: ; %bb.0:
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s32, 0x1200
-; WAVE32-WWM-PREALLOC-NEXT: s_getpc_b64 s[24:25]
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s24, s0
-; WAVE32-WWM-PREALLOC-NEXT: s_load_dwordx4 s[24:27], s[24:25], 0x0
+; WAVE32-WWM-PREALLOC-NEXT: s_getpc_b64 s[20:21]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s20, s0
+; WAVE32-WWM-PREALLOC-NEXT: s_load_dwordx4 s[20:23], s[20:21], 0x0
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt lgkmcnt(0)
-; WAVE32-WWM-PREALLOC-NEXT: s_bitset0_b32 s27, 21
-; WAVE32-WWM-PREALLOC-NEXT: s_add_u32 s24, s24, s9
-; WAVE32-WWM-PREALLOC-NEXT: s_addc_u32 s25, s25, 0
+; WAVE32-WWM-PREALLOC-NEXT: s_bitset0_b32 s23, 21
+; WAVE32-WWM-PREALLOC-NEXT: s_add_u32 s20, s20, s9
+; WAVE32-WWM-PREALLOC-NEXT: s_addc_u32 s21, s21, 0
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $vgpr32 : SGPR spill to VGPR lane
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s14, s8
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s13, s7
@@ -1174,13 +1172,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-NEXT: s_lshr_b32 s0, s0, 5
; WAVE32-WWM-PREALLOC-NEXT: v_writelane_b32 v32, s0, 1
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v3, 42
-; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[24:27], 0
+; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[20:23], 0
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt_vscnt null, 0x0
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[0:1], s[24:25]
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[2:3], s[26:27]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[0:1], s[20:21]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[2:3], s[22:23]
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s6, s32
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v3, 17
-; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[24:27], s6 offset:4
+; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[20:23], s6 offset:4
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s6, stack_passed_argument@abs32@hi
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
; WAVE32-WWM-PREALLOC-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
@@ -1254,7 +1252,6 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-WWM-PREALLOC-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s1, v32, 1
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s0, v32, 0
@@ -1347,7 +1344,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-O0: ; %bb.0:
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE32-O0-NEXT: s_mov_b32 s26, s33
+; WAVE32-O0-NEXT: s_mov_b32 s25, s33
; WAVE32-O0-NEXT: s_mov_b32 s33, s32
; WAVE32-O0-NEXT: s_xor_saveexec_b32 s16, -1
; WAVE32-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1361,9 +1358,9 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 0
; WAVE32-O0-NEXT: s_lshr_b32 s16, s16, 5
; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 1
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s25, -1
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s25
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
; WAVE32-O0-NEXT: v_mov_b32_e32 v0, 42
; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33
; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
@@ -1440,11 +1437,10 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s25, -1
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s25
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: v_readlane_b32 s5, v0, 1
; WAVE32-O0-NEXT: v_readlane_b32 s4, v0, 0
@@ -1460,14 +1456,14 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s4
; WAVE32-O0-NEXT: s_add_i32 s32, s32, 0xffffee00
-; WAVE32-O0-NEXT: s_mov_b32 s33, s26
+; WAVE32-O0-NEXT: s_mov_b32 s33, s25
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
;
; WAVE64-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE64-O0: ; %bb.0:
; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE64-O0-NEXT: s_mov_b32 s28, s33
+; WAVE64-O0-NEXT: s_mov_b32 s19, s33
; WAVE64-O0-NEXT: s_mov_b32 s33, s32
; WAVE64-O0-NEXT: s_xor_saveexec_b64 s[16:17], -1
; WAVE64-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1560,7 +1556,6 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE64-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE64-O0-NEXT: s_or_saveexec_b64 s[26:27], -1
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
@@ -1580,14 +1575,14 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
; WAVE64-O0-NEXT: s_mov_b64 exec, s[4:5]
; WAVE64-O0-NEXT: s_add_i32 s32, s32, 0xffffdc00
-; WAVE64-O0-NEXT: s_mov_b32 s33, s28
+; WAVE64-O0-NEXT: s_mov_b32 s33, s19
; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
;
; WAVE32-WWM-PREALLOC-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-WWM-PREALLOC: ; %bb.0:
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s25, s33
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s24, s33
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s32
; WAVE32-WWM-PREALLOC-NEXT: s_xor_saveexec_b32 s16, -1
; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1677,7 +1672,6 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-WWM-PREALLOC-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s5, v32, 1
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s4, v32, 0
@@ -1693,7 +1687,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-WWM-PREALLOC-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 exec_lo, s4
; WAVE32-WWM-PREALLOC-NEXT: s_add_i32 s32, s32, 0xffffee00
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s25
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s24
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt vmcnt(0)
; WAVE32-WWM-PREALLOC-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca [32 x i32], addrspace(5)
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
index 8d5dc79..049db01 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
@@ -31,6 +31,6 @@ define amdgpu_kernel void @kernel1() #1 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
index 7a6f82d..c9387f1 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
@@ -98,7 +98,7 @@ define amdgpu_kernel void @kernel2() #0 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
; CHECK: attributes #[[ATTR0]] = { "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR2]] = { "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
index c04154c..7183da2 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
@@ -41,6 +41,6 @@ define amdgpu_kernel void @kernel3() #2 {
attributes #2 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
index 2d5ff04..6ed04cf 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
@@ -41,7 +41,7 @@ define amdgpu_kernel void @kernel2() #2 {
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
index e8bf6fc..d5ba2fd 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
@@ -52,8 +52,8 @@ attributes #0 = { nounwind }
attributes #1 = { "uniform-work-group-size"="false" }
attributes #2 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR3]] = { "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
index 473eea4..7f0dfea 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
@@ -101,7 +101,7 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %m) #1 {
attributes #0 = { nounwind readnone }
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { nounwind memory(none) "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { nounwind memory(none) "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { nounwind memory(none) "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { nounwind memory(none) "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
index 221f1a1..8616c73 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
@@ -61,6 +61,6 @@ define amdgpu_kernel void @kernel3() #0 {
attributes #0 = { "uniform-work-group-size"="false" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
index 717d3d9..0407994 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
@@ -540,6 +540,7 @@ define internal void @use512vgprs() {
}
define void @foo() #0 {
+ call void asm sideeffect "; use $0", "a"(i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
index d2364a6..bfc249e 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
@@ -233,10 +233,10 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.1.Flow:
; SI-NEXT: successors: %bb.2(0x40000000), %bb.10(0x40000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %49:vgpr_32, %bb.0, %4, %bb.9
- ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %51:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %53:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %55:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %47:vgpr_32, %bb.0, %4, %bb.9
+ ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %49:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %51:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %53:vgpr_32, %bb.9
; SI-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: S_BRANCH %bb.2
; SI-NEXT: {{ $}}
@@ -249,8 +249,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.3:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %57:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
- ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %59:vgpr_32, %bb.4, [[PHI1]], %bb.2
+ ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %55:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+ ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %57:vgpr_32, %bb.4, [[PHI1]], %bb.2
; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -286,8 +286,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.7:
; SI-NEXT: successors: %bb.8(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %61:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
- ; SI-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %63:vgpr_32, %bb.8, [[COPY4]], %bb.6
+ ; SI-NEXT: [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %59:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+ ; SI-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %61:vgpr_32, %bb.8, [[COPY4]], %bb.6
; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
@@ -356,9 +356,9 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.1.Flow:
; SI-NEXT: successors: %bb.2(0x40000000), %bb.10(0x40000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %50:vgpr_32, %bb.0, %4, %bb.9
- ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %52:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %54:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %48:vgpr_32, %bb.0, %4, %bb.9
+ ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %50:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %52:vgpr_32, %bb.9
; SI-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: S_BRANCH %bb.2
; SI-NEXT: {{ $}}
@@ -371,7 +371,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.3:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+ ; SI-NEXT: [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %54:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -407,7 +407,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.7:
; SI-NEXT: successors: %bb.8(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %58:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+ ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
index 37f207f..4939d526 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
@@ -47,7 +47,6 @@ define protected amdgpu_kernel void @kern(ptr %addr) !llvm.amdgcn.lds.kernel.id
; CHECK-NEXT: s_mov_b32 s15, 42
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir b/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
index 3d9db68..6659e95 100644
--- a/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
@@ -20,6 +20,7 @@ name: undef_identity_copy
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 82816b4..901e88a 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -2479,8 +2479,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1032-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX1032-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX1032-NEXT: v_mul_lo_u32 v2, s1, v1
-; GFX1032-NEXT: s_ff1_i32_b32 s1, 0x80000000
-; GFX1032-NEXT: s_add_i32 s1, s1, 32
+; GFX1032-NEXT: s_brev_b32 s1, 1
; GFX1032-NEXT: v_mul_hi_u32 v2, v1, v2
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v1, v2
; GFX1032-NEXT: v_mul_hi_u32 v1, v0, v1
@@ -2494,8 +2493,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1032-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_lshr_b32 s0, vcc_lo, 1
-; GFX1032-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1032-NEXT: s_min_u32 s0, s0, s1
+; GFX1032-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1032-NEXT: s_cmp_gt_u32 s0, 9
; GFX1032-NEXT: s_cselect_b32 s0, -1, 0
; GFX1032-NEXT: s_and_b32 s0, vcc_lo, s0
@@ -2529,10 +2527,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1064-NEXT: s_lshr_b64 s[0:1], vcc, 1
; GFX1064-NEXT: s_bitset1_b32 s1, 31
-; GFX1064-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1064-NEXT: s_ff1_i32_b32 s1, s1
-; GFX1064-NEXT: s_add_i32 s1, s1, 32
-; GFX1064-NEXT: s_min_u32 s0, s0, s1
+; GFX1064-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1064-NEXT: s_cmp_gt_u32 s0, 9
; GFX1064-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX1064-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
@@ -2576,9 +2571,8 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: v_div_scale_f32 v1, s1, s0, s0, v0
; GFX1032-NEXT: v_div_scale_f32 v4, vcc_lo, v0, s0, v0
-; GFX1032-NEXT: s_ff1_i32_b32 s1, 0x80000000
+; GFX1032-NEXT: s_brev_b32 s1, 1
; GFX1032-NEXT: v_rcp_f32_e32 v2, v1
-; GFX1032-NEXT: s_add_i32 s1, s1, 32
; GFX1032-NEXT: v_fma_f32 v3, -v1, v2, 1.0
; GFX1032-NEXT: v_fmac_f32_e32 v2, v3, v2
; GFX1032-NEXT: v_mul_f32_e32 v3, v4, v2
@@ -2592,8 +2586,7 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1032-NEXT: v_cmp_eq_f32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_lshr_b32 s0, vcc_lo, 1
; GFX1032-NEXT: v_cmp_nlg_f32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1032-NEXT: s_min_u32 s0, s0, s1
+; GFX1032-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1032-NEXT: s_cmp_gt_u32 s0, 9
; GFX1032-NEXT: s_cselect_b32 s0, -1, 0
; GFX1032-NEXT: s_and_b32 s0, vcc_lo, s0
@@ -2609,15 +2602,15 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: v_div_scale_f32 v1, s[0:1], s2, s2, v0
-; GFX1064-NEXT: v_div_scale_f32 v4, vcc, v0, s2, v0
; GFX1064-NEXT: v_rcp_f32_e32 v2, v1
; GFX1064-NEXT: v_fma_f32 v3, -v1, v2, 1.0
; GFX1064-NEXT: v_fmac_f32_e32 v2, v3, v2
-; GFX1064-NEXT: v_mul_f32_e32 v3, v4, v2
-; GFX1064-NEXT: v_fma_f32 v5, -v1, v3, v4
-; GFX1064-NEXT: v_fmac_f32_e32 v3, v5, v2
-; GFX1064-NEXT: v_fma_f32 v1, -v1, v3, v4
-; GFX1064-NEXT: v_div_fmas_f32 v1, v1, v2, v3
+; GFX1064-NEXT: v_div_scale_f32 v3, vcc, v0, s2, v0
+; GFX1064-NEXT: v_mul_f32_e32 v4, v3, v2
+; GFX1064-NEXT: v_fma_f32 v5, -v1, v4, v3
+; GFX1064-NEXT: v_fmac_f32_e32 v4, v5, v2
+; GFX1064-NEXT: v_fma_f32 v1, -v1, v4, v3
+; GFX1064-NEXT: v_div_fmas_f32 v1, v1, v2, v4
; GFX1064-NEXT: v_div_fixup_f32 v1, v1, s2, v0
; GFX1064-NEXT: v_trunc_f32_e32 v1, v1
; GFX1064-NEXT: v_fma_f32 v0, -v1, s2, v0
@@ -2625,10 +2618,7 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1064-NEXT: s_lshr_b64 s[0:1], vcc, 1
; GFX1064-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
; GFX1064-NEXT: s_bitset1_b32 s1, 31
-; GFX1064-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1064-NEXT: s_ff1_i32_b32 s1, s1
-; GFX1064-NEXT: s_add_i32 s1, s1, 32
-; GFX1064-NEXT: s_min_u32 s0, s0, s1
+; GFX1064-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1064-NEXT: s_cmp_gt_u32 s0, 9
; GFX1064-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX1064-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
index 3a33194..7eabe98 100644
--- a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
@@ -101,7 +101,6 @@ define void @test() #0 {
; GCN-O0-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-O0-NEXT: s_waitcnt lgkmcnt(0)
; GCN-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-O0-NEXT: s_or_saveexec_b64 s[28:29], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
index 11f6a29..e79cb66 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
@@ -406,7 +406,6 @@ define amdgpu_gfx void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 inreg
; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[44:45]
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[46:47]
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr44_sgpr45
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[42:43]
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2
@@ -633,7 +632,6 @@ define amdgpu_gfx void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 i
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[38:39]
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr36_sgpr37
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[34:35]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[46:47], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
index e5cebc1..def51f2 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
@@ -413,7 +413,6 @@ define amdgpu_kernel void @call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
@@ -657,7 +656,6 @@ define amdgpu_kernel void @call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %ar
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
@@ -1285,7 +1283,6 @@ define amdgpu_kernel void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 in
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
@@ -1529,7 +1526,6 @@ define amdgpu_kernel void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i6
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
diff --git a/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll b/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 365727c..0795525 100644
--- a/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -8,10 +8,8 @@
%struct.Foo = type { ptr }
-; ARM-LABEL: foo:
-; THUMB-LABEL: foo:
-; T2-LABEL: foo:
define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
+; ARM-LABEL: foo:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: add r2, r0, #4
; ARM-NEXT: mov r12, #1
@@ -44,6 +42,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; ARM-NEXT: add r0, r0, r1, lsl #2
; ARM-NEXT: mov pc, lr
;
+; THUMB-LABEL: foo:
; THUMB: @ %bb.0: @ %entry
; THUMB-NEXT: .save {r4, r5, r7, lr}
; THUMB-NEXT: push {r4, r5, r7, lr}
@@ -91,6 +90,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; THUMB-NEXT: pop {r0}
; THUMB-NEXT: bx r0
;
+; T2-LABEL: foo:
; T2: @ %bb.0: @ %entry
; T2-NEXT: adds r2, r0, #4
; T2-NEXT: mov.w r12, #1
@@ -125,6 +125,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; T2-NEXT: add.w r0, r0, r1, lsl #2
; T2-NEXT: bx lr
;
+; V8-LABEL: foo:
; V8: @ %bb.0: @ %entry
; V8-NEXT: adds r2, r0, #4
; V8-NEXT: mov.w r12, #1
@@ -210,11 +211,8 @@ sw.epilog: ; preds = %tailrecurse.switch
%struct.S = type { ptr, [1 x i8] }
-; ARM-LABEL: bar:
-; THUMB-LABEL: bar:
-; T2-LABEL: bar:
-; V8-LABEL: bar:
define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
+; ARM-LABEL: bar:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r2, [r0, #4]
; ARM-NEXT: ands r2, r2, #112
@@ -230,6 +228,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; ARM-NEXT: mov r0, #1
; ARM-NEXT: mov pc, lr
;
+; THUMB-LABEL: bar:
; THUMB: @ %bb.0: @ %entry
; THUMB-NEXT: ldrb r2, [r0, #4]
; THUMB-NEXT: movs r3, #112
@@ -253,6 +252,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; THUMB-NEXT: ands r0, r1
; THUMB-NEXT: bx lr
;
+; T2-LABEL: bar:
; T2: @ %bb.0: @ %entry
; T2-NEXT: ldrb r2, [r0, #4]
; T2-NEXT: ands r2, r2, #112
@@ -270,6 +270,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; T2-NEXT: movs r0, #1
; T2-NEXT: bx lr
;
+; V8-LABEL: bar:
; V8: @ %bb.0: @ %entry
; V8-NEXT: ldrb r2, [r0, #4]
; V8-NEXT: ands r2, r2, #112
diff --git a/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir b/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
index 5c59566..b4bbb9b 100644
--- a/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
+++ b/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
@@ -86,6 +86,8 @@
---
name: main
exposesReturnsTwice: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: P0, size: 80, alignment: 8, local-offset: -80 }
- { id: 1, name: jb1, size: 160, alignment: 8, local-offset: -240 }
diff --git a/llvm/test/CodeGen/ARM/select.ll b/llvm/test/CodeGen/ARM/select.ll
index 4bb7965..24ca9ae 100644
--- a/llvm/test/CodeGen/ARM/select.ll
+++ b/llvm/test/CodeGen/ARM/select.ll
@@ -1,14 +1,25 @@
-; RUN: llc -mtriple=arm-apple-darwin %s -o - | FileCheck %s
-
-; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
-; RUN: | FileCheck %s --check-prefix=CHECK-VFP
-
-; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=+neon,+thumb2 %s -o - \
-; RUN: | FileCheck %s --check-prefix=CHECK-NEON
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=armv7-eabi -mattr=-fpregs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ARM
+; RUN: llc -mtriple=armv7-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-VFP
+; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=+neon,+thumb2 %s -o - | FileCheck %s --check-prefix=CHECK-NEON
define i32 @f1(i32 %a.s) {
-;CHECK-LABEL: f1:
-;CHECK: moveq
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r1, #3
+; CHECK-NEXT: cmp r0, #4
+; CHECK-NEXT: movweq r1, #2
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f1:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r1, #3
+; CHECK-NEON-NEXT: cmp r0, #4
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: moveq r1, #2
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp eq i32 %a.s, 4
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -16,8 +27,22 @@ entry:
}
define i32 @f2(i32 %a.s) {
-;CHECK-LABEL: f2:
-;CHECK: movgt
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r1, #3
+; CHECK-NEXT: cmp r0, #4
+; CHECK-NEXT: movwgt r1, #2
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f2:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r1, #3
+; CHECK-NEON-NEXT: cmp r0, #4
+; CHECK-NEON-NEXT: it gt
+; CHECK-NEON-NEXT: movgt r1, #2
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp sgt i32 %a.s, 4
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -25,8 +50,22 @@ entry:
}
define i32 @f3(i32 %a.s, i32 %b.s) {
-;CHECK-LABEL: f3:
-;CHECK: movlt
+; CHECK-LABEL: f3:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwlt r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f3:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it lt
+; CHECK-NEON-NEXT: movlt r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp slt i32 %a.s, %b.s
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -34,8 +73,22 @@ entry:
}
define i32 @f4(i32 %a.s, i32 %b.s) {
-;CHECK-LABEL: f4:
-;CHECK: movle
+; CHECK-LABEL: f4:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwle r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f4:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it le
+; CHECK-NEON-NEXT: movle r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp sle i32 %a.s, %b.s
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -43,8 +96,22 @@ entry:
}
define i32 @f5(i32 %a.u, i32 %b.u) {
-;CHECK-LABEL: f5:
-;CHECK: movls
+; CHECK-LABEL: f5:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwls r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f5:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it ls
+; CHECK-NEON-NEXT: movls r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp ule i32 %a.u, %b.u
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -52,8 +119,22 @@ entry:
}
define i32 @f6(i32 %a.u, i32 %b.u) {
-;CHECK-LABEL: f6:
-;CHECK: movhi
+; CHECK-LABEL: f6:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwhi r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f6:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it hi
+; CHECK-NEON-NEXT: movhi r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp ugt i32 %a.u, %b.u
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -61,11 +142,61 @@ entry:
}
define double @f7(double %a, double %b) {
-;CHECK-LABEL: f7:
-;CHECK: movmi
-;CHECK: movpl
-;CHECK-VFP-LABEL: f7:
-;CHECK-VFP: vmovmi
+; CHECK-ARM-LABEL: f7:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: .save {r4, r5, r11, lr}
+; CHECK-ARM-NEXT: push {r4, r5, r11, lr}
+; CHECK-ARM-NEXT: mov r4, r3
+; CHECK-ARM-NEXT: movw r3, #48758
+; CHECK-ARM-NEXT: mov r5, r2
+; CHECK-ARM-NEXT: movw r2, #14680
+; CHECK-ARM-NEXT: movt r2, #51380
+; CHECK-ARM-NEXT: movt r3, #16371
+; CHECK-ARM-NEXT: bl __aeabi_dcmplt
+; CHECK-ARM-NEXT: cmp r0, #0
+; CHECK-ARM-NEXT: movwne r4, #0
+; CHECK-ARM-NEXT: movwne r5, #0
+; CHECK-ARM-NEXT: movtne r4, #49136
+; CHECK-ARM-NEXT: mov r0, r5
+; CHECK-ARM-NEXT: mov r1, r4
+; CHECK-ARM-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-VFP-LABEL: f7:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vldr d17, .LCPI6_0
+; CHECK-VFP-NEXT: vmov d19, r0, r1
+; CHECK-VFP-NEXT: vmov.f64 d16, #-1.000000e+00
+; CHECK-VFP-NEXT: vcmp.f64 d19, d17
+; CHECK-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-VFP-NEXT: vmov d18, r2, r3
+; CHECK-VFP-NEXT: vmovmi.f64 d18, d16
+; CHECK-VFP-NEXT: vmov r0, r1, d18
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 3
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI6_0:
+; CHECK-VFP-NEXT: .long 3367254360 @ double 1.234
+; CHECK-VFP-NEXT: .long 1072938614
+;
+; CHECK-NEON-LABEL: f7:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr d17, LCPI6_0
+; CHECK-NEON-NEXT: vmov d19, r0, r1
+; CHECK-NEON-NEXT: vmov d18, r2, r3
+; CHECK-NEON-NEXT: vcmp.f64 d19, d17
+; CHECK-NEON-NEXT: vmov.f64 d16, #-1.000000e+00
+; CHECK-NEON-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEON-NEXT: it mi
+; CHECK-NEON-NEXT: vmovmi.f64 d18, d16
+; CHECK-NEON-NEXT: vmov r0, r1, d18
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 3
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI6_0:
+; CHECK-NEON-NEXT: .long 3367254360 @ double 1.234
+; CHECK-NEON-NEXT: .long 1072938614
+; CHECK-NEON-NEXT: .end_data_region
%tmp = fcmp olt double %a, 1.234e+00
%tmp1 = select i1 %tmp, double -1.000e+00, double %b
ret double %tmp1
@@ -77,18 +208,49 @@ define double @f7(double %a, double %b) {
; a lack of a custom lowering routine for an ISD::SELECT. This would result in
; two "it" blocks in the code: one for the "icmp" and another to move the index
; into the constant pool based on the value of the "icmp". If we have one "it"
-; block generated, odds are good that we have close to the ideal code for this:
+; block generated, odds are good that we have close to the ideal code for this.
+define arm_apcscc float @f8(i32 %a) nounwind {
+; CHECK-ARM-LABEL: f8:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: movw r1, #29905
+; CHECK-ARM-NEXT: movw r2, #1123
+; CHECK-ARM-NEXT: movt r1, #16408
+; CHECK-ARM-NEXT: cmp r0, r2
+; CHECK-ARM-NEXT: movweq r1, #62390
+; CHECK-ARM-NEXT: movteq r1, #16285
+; CHECK-ARM-NEXT: mov r0, r1
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f8:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: movw r2, #1123
+; CHECK-VFP-NEXT: adr r1, .LCPI7_0
+; CHECK-VFP-NEXT: cmp r0, r2
+; CHECK-VFP-NEXT: addeq r1, r1, #4
+; CHECK-VFP-NEXT: ldr r0, [r1]
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI7_0:
+; CHECK-VFP-NEXT: .long 0x401874d1 @ float 2.38212991
+; CHECK-VFP-NEXT: .long 0x3f9df3b6 @ float 1.23399997
;
; CHECK-NEON-LABEL: f8:
-; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0
-; CHECK-NEON: movw [[R3:r[0-9]+]], #1123
-; CHECK-NEON-NEXT: cmp r0, [[R3]]
-; CHECK-NEON-NEXT: it eq
-; CHECK-NEON-NEXT: addeq{{.*}} [[R2]], #4
-; CHECK-NEON-NEXT: ldr
-; CHECK-NEON: bx
-
-define arm_apcscc float @f8(i32 %a) nounwind {
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: adr r1, LCPI7_0
+; CHECK-NEON-NEXT: movw r2, #1123
+; CHECK-NEON-NEXT: cmp r0, r2
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: addeq r1, #4
+; CHECK-NEON-NEXT: ldr r0, [r1]
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI7_0:
+; CHECK-NEON-NEXT: .long 0x401874d1 @ float 2.38212991
+; CHECK-NEON-NEXT: .long 0x3f9df3b6 @ float 1.23399997
+; CHECK-NEON-NEXT: .end_data_region
%tmp = icmp eq i32 %a, 1123
%tmp1 = select i1 %tmp, float 0x3FF3BE76C0000000, float 0x40030E9A20000000
ret float %tmp1
@@ -98,10 +260,40 @@ define arm_apcscc float @f8(i32 %a) nounwind {
; Glue values can only have a single use, but the following test exposed a
; case where a SELECT was lowered with 2 uses of a comparison, causing the
; scheduler to assert.
-; CHECK-VFP-LABEL: f9:
-
declare ptr @objc_msgSend(ptr, ptr, ...)
define void @f9() optsize {
+; CHECK-LABEL: f9:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: movw r2, #0
+; CHECK-NEXT: movw r3, #0
+; CHECK-NEXT: mov r1, #1065353216
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: movt r2, #16672
+; CHECK-NEXT: movt r3, #32704
+; CHECK-NEXT: strd r0, r1, [sp]
+; CHECK-NEXT: bl objc_msgSend
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NEON-LABEL: f9:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: str lr, [sp, #-4]!
+; CHECK-NEON-NEXT: sub sp, #8
+; CHECK-NEON-NEXT: movs r2, #0
+; CHECK-NEON-NEXT: movs r3, #0
+; CHECK-NEON-NEXT: mov.w r0, #1065353216
+; CHECK-NEON-NEXT: movs r1, #0
+; CHECK-NEON-NEXT: movt r2, #16672
+; CHECK-NEON-NEXT: movt r3, #32704
+; CHECK-NEON-NEXT: strd r1, r0, [sp]
+; CHECK-NEON-NEXT: bl _objc_msgSend
+; CHECK-NEON-NEXT: add sp, #8
+; CHECK-NEON-NEXT: ldr lr, [sp], #4
+; CHECK-NEON-NEXT: bx lr
entry:
%cmp = icmp eq ptr undef, inttoptr (i32 4 to ptr)
%conv191 = select i1 %cmp, float -3.000000e+00, float 0.000000e+00
@@ -117,36 +309,151 @@ entry:
ret void
}
-; CHECK-LABEL: f10:
define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatsisf
+; CHECK-ARM-LABEL: f10:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: moveq r2, #1065353216
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f10:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI9_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI9_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f10:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI9_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI9_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = zext i1 %1 to i32
%3 = sitofp i32 %2 to float
ret float %3
}
-; CHECK-LABEL: f11:
define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatsisf
+; CHECK-ARM-LABEL: f11:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: movweq r2, #0
+; CHECK-ARM-NEXT: movteq r2, #49024
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f11:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #-1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI10_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI10_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f11:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI10_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #-1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI10_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = sitofp i1 %1 to float
ret float %2
}
-; CHECK-LABEL: f12:
define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatunsisf
+; CHECK-ARM-LABEL: f12:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: moveq r2, #1065353216
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f12:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI11_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI11_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f12:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI11_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI11_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = uitofp i1 %1 to float
ret float %2
}
-; CHECK-LABEL: test_overflow_recombine:
define i1 @test_overflow_recombine(i32 %in1, i32 %in2) {
-; CHECK: smull [[LO:r[0-9]+]], [[HI:r[0-9]+]]
-; CHECK: subs [[ZERO:r[0-9]+]], [[HI]], [[LO]], asr #31
-; CHECK: movne [[ZERO]], #1
+; CHECK-LABEL: test_overflow_recombine:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mul r2, r0, r1
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: subs r0, r0, r2, asr #31
+; CHECK-NEXT: movwne r0, #1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: test_overflow_recombine:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: mul r2, r0, r1
+; CHECK-NEON-NEXT: smmul r0, r0, r1
+; CHECK-NEON-NEXT: subs.w r0, r0, r2, asr #31
+; CHECK-NEON-NEXT: it ne
+; CHECK-NEON-NEXT: movne r0, #1
+; CHECK-NEON-NEXT: bx lr
%prod = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %in1, i32 %in2)
%overflow = extractvalue { i32, i1 } %prod, 1
ret i1 %overflow
diff --git a/llvm/test/CodeGen/AVR/bug-81911.ll b/llvm/test/CodeGen/AVR/bug-81911.ll
new file mode 100644
index 0000000..2a22666
--- /dev/null
+++ b/llvm/test/CodeGen/AVR/bug-81911.ll
@@ -0,0 +1,163 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=avr -mcpu=atmega328 -O1 -verify-machineinstrs | FileCheck %s
+
+define internal i8 @main() {
+; CHECK-LABEL: main:
+; CHECK: ; %bb.0: ; %bb0
+; CHECK-NEXT: push r2
+; CHECK-NEXT: push r3
+; CHECK-NEXT: push r4
+; CHECK-NEXT: push r5
+; CHECK-NEXT: push r6
+; CHECK-NEXT: push r7
+; CHECK-NEXT: push r8
+; CHECK-NEXT: push r9
+; CHECK-NEXT: push r10
+; CHECK-NEXT: push r11
+; CHECK-NEXT: push r12
+; CHECK-NEXT: push r13
+; CHECK-NEXT: push r14
+; CHECK-NEXT: push r15
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: push r28
+; CHECK-NEXT: push r29
+; CHECK-NEXT: in r28, 61
+; CHECK-NEXT: in r29, 62
+; CHECK-NEXT: sbiw r28, 13
+; CHECK-NEXT: in r0, 63
+; CHECK-NEXT: cli
+; CHECK-NEXT: out 62, r29
+; CHECK-NEXT: out 63, r0
+; CHECK-NEXT: out 61, r28
+; CHECK-NEXT: ldi r16, 0
+; CHECK-NEXT: ldi r17, 0
+; CHECK-NEXT: ldi r18, -1
+; CHECK-NEXT: ;APP
+; CHECK-NEXT: ldi r24, 123
+; CHECK-NEXT: ;NO_APP
+; CHECK-NEXT: std Y+1, r24 ; 1-byte Folded Spill
+; CHECK-NEXT: movw r24, r28
+; CHECK-NEXT: adiw r24, 6
+; CHECK-NEXT: std Y+3, r25 ; 2-byte Folded Spill
+; CHECK-NEXT: std Y+2, r24 ; 2-byte Folded Spill
+; CHECK-NEXT: movw r8, r16
+; CHECK-NEXT: movw r6, r16
+; CHECK-NEXT: movw r4, r16
+; CHECK-NEXT: movw r2, r16
+; CHECK-NEXT: rjmp .LBB0_2
+; CHECK-NEXT: .LBB0_1: ; %bb1
+; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: andi r30, 1
+; CHECK-NEXT: ldd r31, Y+4 ; 1-byte Folded Reload
+; CHECK-NEXT: dec r31
+; CHECK-NEXT: cpi r30, 0
+; CHECK-NEXT: movw r8, r18
+; CHECK-NEXT: movw r6, r20
+; CHECK-NEXT: movw r4, r22
+; CHECK-NEXT: movw r2, r24
+; CHECK-NEXT: mov r18, r31
+; CHECK-NEXT: brne .LBB0_2
+; CHECK-NEXT: rjmp .LBB0_4
+; CHECK-NEXT: .LBB0_2: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: std Y+4, r18 ; 1-byte Folded Spill
+; CHECK-NEXT: movw r18, r8
+; CHECK-NEXT: movw r20, r6
+; CHECK-NEXT: movw r22, r4
+; CHECK-NEXT: movw r24, r2
+; CHECK-NEXT: ldi r26, 10
+; CHECK-NEXT: ldi r27, 0
+; CHECK-NEXT: movw r10, r26
+; CHECK-NEXT: movw r12, r16
+; CHECK-NEXT: movw r14, r16
+; CHECK-NEXT: call __udivdi3
+; CHECK-NEXT: std Y+13, r25
+; CHECK-NEXT: std Y+12, r24
+; CHECK-NEXT: std Y+11, r23
+; CHECK-NEXT: std Y+10, r22
+; CHECK-NEXT: std Y+9, r21
+; CHECK-NEXT: std Y+8, r20
+; CHECK-NEXT: std Y+7, r19
+; CHECK-NEXT: std Y+6, r18
+; CHECK-NEXT: ldd r30, Y+2 ; 2-byte Folded Reload
+; CHECK-NEXT: ldd r31, Y+3 ; 2-byte Folded Reload
+; CHECK-NEXT: ;APP
+; CHECK-NEXT: ;NO_APP
+; CHECK-NEXT: ldi r30, 1
+; CHECK-NEXT: cp r8, r1
+; CHECK-NEXT: cpc r9, r1
+; CHECK-NEXT: cpc r6, r16
+; CHECK-NEXT: cpc r7, r17
+; CHECK-NEXT: cpc r4, r16
+; CHECK-NEXT: cpc r5, r17
+; CHECK-NEXT: cpc r2, r16
+; CHECK-NEXT: cpc r3, r17
+; CHECK-NEXT: breq .LBB0_3
+; CHECK-NEXT: rjmp .LBB0_1
+; CHECK-NEXT: .LBB0_3: ; %bb1
+; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: mov r30, r1
+; CHECK-NEXT: rjmp .LBB0_1
+; CHECK-NEXT: .LBB0_4: ; %bb3
+; CHECK-NEXT: ldd r24, Y+1 ; 1-byte Folded Reload
+; CHECK-NEXT: std Y+5, r24
+; CHECK-NEXT: movw r24, r28
+; CHECK-NEXT: adiw r24, 5
+; CHECK-NEXT: ;APP
+; CHECK-NEXT: ;NO_APP
+; CHECK-NEXT: ldd r24, Y+5
+; CHECK-NEXT: adiw r28, 13
+; CHECK-NEXT: in r0, 63
+; CHECK-NEXT: cli
+; CHECK-NEXT: out 62, r29
+; CHECK-NEXT: out 63, r0
+; CHECK-NEXT: out 61, r28
+; CHECK-NEXT: pop r29
+; CHECK-NEXT: pop r28
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: pop r15
+; CHECK-NEXT: pop r14
+; CHECK-NEXT: pop r13
+; CHECK-NEXT: pop r12
+; CHECK-NEXT: pop r11
+; CHECK-NEXT: pop r10
+; CHECK-NEXT: pop r9
+; CHECK-NEXT: pop r8
+; CHECK-NEXT: pop r7
+; CHECK-NEXT: pop r6
+; CHECK-NEXT: pop r5
+; CHECK-NEXT: pop r4
+; CHECK-NEXT: pop r3
+; CHECK-NEXT: pop r2
+; CHECK-NEXT: ret
+bb0:
+ %0 = alloca i64
+ %1 = alloca i8
+ %2 = tail call i8 asm sideeffect "ldi ${0}, 123", "=&r,~{sreg},~{memory}"()
+
+ br label %bb1
+
+bb1:
+ %3 = phi i64 [ %5, %bb1 ], [ 0, %bb0 ]
+ %4 = phi i8 [ %6, %bb1 ], [ 0, %bb0 ]
+ %5 = udiv i64 %3, 10
+ %6 = add i8 %4, 1
+
+ store i64 %5, ptr %0
+ call void asm sideeffect "", "r,~{memory}"(ptr %0)
+
+ %7 = icmp eq i64 %3, 0
+ %8 = icmp eq i8 %6, 0
+
+ br i1 %7, label %bb3, label %bb1
+
+bb3:
+ store i8 %2, ptr %1
+ call void asm sideeffect "", "r,~{memory}"(ptr %1)
+
+ %9 = load i8, ptr %1
+
+ ret i8 %9
+}
diff --git a/llvm/test/CodeGen/BPF/addr-space-globals.ll b/llvm/test/CodeGen/BPF/addr-space-globals.ll
index 878ba0d..73e80b7 100644
--- a/llvm/test/CodeGen/BPF/addr-space-globals.ll
+++ b/llvm/test/CodeGen/BPF/addr-space-globals.ll
@@ -18,7 +18,7 @@
; Verify that a,b,c reside in the same section
-; CHECK: .section .arena.272,"aw",@progbits
+; CHECK: .section .addr_space.272,"aw",@progbits
; CHECK-NOT: .section
; CHECK: .globl a
; CHECK: .ascii "\001\002"
diff --git a/llvm/test/CodeGen/BPF/addr-space-globals2.ll b/llvm/test/CodeGen/BPF/addr-space-globals2.ll
index d1e2318..5944cb2 100644
--- a/llvm/test/CodeGen/BPF/addr-space-globals2.ll
+++ b/llvm/test/CodeGen/BPF/addr-space-globals2.ll
@@ -14,12 +14,12 @@
; Verify that a,b reside in separate sections
-; CHECK: .section .arena.1,"aw",@progbits
+; CHECK: .section .addr_space.1,"aw",@progbits
; CHECK-NOT: .section
; CHECK: .globl a
; CHECK: .ascii "\001\002"
-; CHECK: .section .arena.2,"aw",@progbits
+; CHECK: .section .addr_space.2,"aw",@progbits
; CHECK-NOT: .section
; CHECK: .globl b
; CHECK: .ascii "\003\004"
diff --git a/llvm/test/CodeGen/BPF/cttz-ctlz.ll b/llvm/test/CodeGen/BPF/cttz-ctlz.ll
new file mode 100644
index 0000000..f42b2e2
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/cttz-ctlz.ll
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+; test that we can expand CTTZ & CTLZ
+
+declare i32 @llvm.cttz.i32(i32, i1)
+
+define i32 @cttz_i32_zdef(i32 %a) {
+; CHECK-LABEL: cttz_i32_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 *= 125613361
+; CHECK-NEXT: r2 = 4160749568 ll
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 >>= 27
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.cttz.i32(i32 %a, i1 1)
+ ret i32 %ret
+}
+
+
+define i32 @cttz_i32(i32 %a) {
+; CHECK-LABEL: cttz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 32
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 <<= 32
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: if r2 == 0 goto LBB1_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 *= 125613361
+; CHECK-NEXT: r2 = 4160749568 ll
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 >>= 27
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: LBB1_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.cttz.i32(i32 %a, i1 0)
+ ret i32 %ret
+}
+
+declare i64 @llvm.cttz.i64(i64, i1)
+
+define i64 @cttz_i64_zdef(i64 %a) {
+; CHECK-LABEL: cttz_i64_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r2 = 151050438420815295 ll
+; CHECK-NEXT: r1 *= r2
+; CHECK-NEXT: r1 >>= 58
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.cttz.i64(i64 %a, i1 1)
+ ret i64 %ret
+}
+
+
+define i64 @cttz_i64(i64 %a) {
+; CHECK-LABEL: cttz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 64
+; CHECK-NEXT: if r1 == 0 goto LBB3_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r2 = 151050438420815295 ll
+; CHECK-NEXT: r1 *= r2
+; CHECK-NEXT: r1 >>= 58
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: LBB3_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.cttz.i64(i64 %a, i1 0)
+ ret i64 %ret
+}
+
+
+declare i32 @llvm.ctlz.i32(i32, i1)
+
+define i32 @ctlz_i32_zdef(i32 %a) {
+; CHECK-LABEL: ctlz_i32_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = 4294967294 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967292 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 2
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967280 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 4
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967040 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 8
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294901760 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 16
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r2 &= 1431655765
+; CHECK-NEXT: r1 -= r2
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= 858993459
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= 858993459
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r0 &= 252645135
+; CHECK-NEXT: r0 *= 16843009
+; CHECK-NEXT: r1 = 4278190080 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r0 >>= 24
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.ctlz.i32(i32 %a, i1 1)
+ ret i32 %ret
+}
+
+
+define i32 @ctlz_i32(i32 %a) {
+; CHECK-LABEL: ctlz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 32
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 <<= 32
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: if r2 == 0 goto LBB5_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = 4294967294 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967292 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 2
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967280 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 4
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967040 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 8
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294901760 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 16
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r2 &= 1431655765
+; CHECK-NEXT: r1 -= r2
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= 858993459
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= 858993459
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r0 &= 252645135
+; CHECK-NEXT: r0 *= 16843009
+; CHECK-NEXT: r1 = 4278190080 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r0 >>= 24
+; CHECK-NEXT: LBB5_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.ctlz.i32(i32 %a, i1 0)
+ ret i32 %ret
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1)
+
+define i64 @ctlz_i64_zdef(i64 %a) {
+; CHECK-LABEL: ctlz_i64_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 2
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 4
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 8
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 16
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = 6148914691236517205 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r1 -= r3
+; CHECK-NEXT: r2 = 3689348814741910323 ll
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= r2
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = 1085102592571150095 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r1 = 72340172838076673 ll
+; CHECK-NEXT: r0 *= r1
+; CHECK-NEXT: r0 >>= 56
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.ctlz.i64(i64 %a, i1 1)
+ ret i64 %ret
+}
+
+
+define i64 @ctlz_i64(i64 %a) {
+; CHECK-LABEL: ctlz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 64
+; CHECK-NEXT: if r1 == 0 goto LBB7_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 2
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 4
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 8
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 16
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = 6148914691236517205 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r1 -= r3
+; CHECK-NEXT: r2 = 3689348814741910323 ll
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= r2
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = 1085102592571150095 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r1 = 72340172838076673 ll
+; CHECK-NEXT: r0 *= r1
+; CHECK-NEXT: r0 >>= 56
+; CHECK-NEXT: LBB7_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.ctlz.i64(i64 %a, i1 0)
+ ret i64 %ret
+}
+
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll
index 865fefe..d027216 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/double-extensions.ll
@@ -3,10 +3,11 @@
target triple = "dxil-pc-shadermodel6.7-library"
-; CHECK: ; Shader Flags Value: 0x00000021
+; CHECK: ; Shader Flags Value: 0x00000044
; CHECK: ; Note: shader requires additional functionality:
; CHECK-NEXT: ; Double-precision floating point
; CHECK-NEXT: ; Double-precision extensions for 11.1
+; CHECK-NEXT: ; Note: extra DXIL module flags:
; CHECK-NEXT: {{^;$}}
define double @div(double %a, double %b) {
%res = fdiv double %a, %b
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll
index f90db61..c1a4c21 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/doubles.ll
@@ -3,10 +3,12 @@
target triple = "dxil-pc-shadermodel6.7-library"
-; CHECK: ; Shader Flags Value: 0x00000001
+; CHECK: ; Shader Flags Value: 0x00000004
; CHECK: ; Note: shader requires additional functionality:
; CHECK-NEXT: ; Double-precision floating point
+; CHECK-NEXT: ; Note: extra DXIL module flags:
; CHECK-NEXT: {{^;$}}
+
define double @add(double %a, double %b) {
%sum = fadd double %a, %b
ret double %sum
diff --git a/llvm/test/CodeGen/DirectX/abs-vec.ll b/llvm/test/CodeGen/DirectX/abs-vec.ll
new file mode 100644
index 0000000..1c40555
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/abs-vec.ll
@@ -0,0 +1,34 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for abs are generated for int vectors.
+
+; CHECK-LABEL: abs_i16Vec2
+define noundef <2 x i16> @abs_i16Vec2(<2 x i16> noundef %a) #0 {
+entry:
+; CHECK: sub <2 x i16> zeroinitializer, %a
+; CHECK: call <2 x i16> @llvm.smax.v2i16(<2 x i16> %a, <2 x i16> %{{.*}})
+ %elt.abs = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %a, i1 false)
+ ret <2 x i16> %elt.abs
+}
+
+; CHECK-LABEL: abs_i32Vec3
+define noundef <3 x i32> @abs_i32Vec3(<3 x i32> noundef %a) #0 {
+entry:
+; CHECK: sub <3 x i32> zeroinitializer, %a
+; CHECK: call <3 x i32> @llvm.smax.v3i32(<3 x i32> %a, <3 x i32> %{{.*}})
+ %elt.abs = call <3 x i32> @llvm.abs.v3i32(<3 x i32> %a, i1 false)
+ ret <3 x i32> %elt.abs
+}
+
+; CHECK-LABEL: abs_i64Vec4
+define noundef <4 x i64> @abs_i64Vec4(<4 x i64> noundef %a) #0 {
+entry:
+; CHECK: sub <4 x i64> zeroinitializer, %a
+; CHECK: call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %{{.*}})
+ %elt.abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
+ ret <4 x i64> %elt.abs
+}
+
+declare <2 x i16> @llvm.abs.v2i16(<2 x i16>, i1 immarg)
+declare <3 x i32> @llvm.abs.v3i32(<3 x i32>, i1 immarg)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1 immarg)
diff --git a/llvm/test/CodeGen/DirectX/abs.ll b/llvm/test/CodeGen/DirectX/abs.ll
new file mode 100644
index 0000000..822580e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/abs.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for abs are generated for int16_t/int/int64_t.
+
+; CHECK-LABEL: abs_i16
+define noundef i16 @abs_i16(i16 noundef %a) {
+entry:
+; CHECK: sub i16 0, %a
+; EXPCHECK: call i16 @llvm.smax.i16(i16 %a, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.binary.i16(i32 37, i16 %a, i16 %{{.*}})
+ %elt.abs = call i16 @llvm.abs.i16(i16 %a, i1 false)
+ ret i16 %elt.abs
+}
+
+; CHECK-LABEL: abs_i32
+define noundef i32 @abs_i32(i32 noundef %a) {
+entry:
+; CHECK: sub i32 0, %a
+; EXPCHECK: call i32 @llvm.smax.i32(i32 %a, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.binary.i32(i32 37, i32 %a, i32 %{{.*}})
+ %elt.abs = call i32 @llvm.abs.i32(i32 %a, i1 false)
+ ret i32 %elt.abs
+}
+
+; CHECK-LABEL: abs_i64
+define noundef i64 @abs_i64(i64 noundef %a) {
+entry:
+; CHECK: sub i64 0, %a
+; EXPCHECK: call i64 @llvm.smax.i64(i64 %a, i64 %{{.*}})
+; DOPCHECK: call i64 @dx.op.binary.i64(i32 37, i64 %a, i64 %{{.*}})
+ %elt.abs = call i64 @llvm.abs.i64(i64 %a, i1 false)
+ ret i64 %elt.abs
+}
+
+declare i16 @llvm.abs.i16(i16, i1 immarg)
+declare i32 @llvm.abs.i32(i32, i1 immarg)
+declare i64 @llvm.abs.i64(i64, i1 immarg)
diff --git a/llvm/test/CodeGen/DirectX/any.ll b/llvm/test/CodeGen/DirectX/any.ll
new file mode 100644
index 0000000..e8d8707
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/any.ll
@@ -0,0 +1,113 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for any are generated for float and half.
+
+; CHECK-LABEL: any_bool
+; CHECK: icmp ne i1 %{{.*}}, false
+define noundef i1 @any_bool(i1 noundef %p0) {
+entry:
+ %p0.addr = alloca i8, align 1
+ %frombool = zext i1 %p0 to i8
+ store i8 %frombool, ptr %p0.addr, align 1
+ %0 = load i8, ptr %p0.addr, align 1
+ %tobool = trunc i8 %0 to i1
+ %dx.any = call i1 @llvm.dx.any.i1(i1 %tobool)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_int64_t
+; CHECK: icmp ne i64 %{{.*}}, 0
+define noundef i1 @any_int64_t(i64 noundef %p0) {
+entry:
+ %p0.addr = alloca i64, align 8
+ store i64 %p0, ptr %p0.addr, align 8
+ %0 = load i64, ptr %p0.addr, align 8
+ %dx.any = call i1 @llvm.dx.any.i64(i64 %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_int
+; CHECK: icmp ne i32 %{{.*}}, 0
+define noundef i1 @any_int(i32 noundef %p0) {
+entry:
+ %p0.addr = alloca i32, align 4
+ store i32 %p0, ptr %p0.addr, align 4
+ %0 = load i32, ptr %p0.addr, align 4
+ %dx.any = call i1 @llvm.dx.any.i32(i32 %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_int16_t
+; CHECK: icmp ne i16 %{{.*}}, 0
+define noundef i1 @any_int16_t(i16 noundef %p0) {
+entry:
+ %p0.addr = alloca i16, align 2
+ store i16 %p0, ptr %p0.addr, align 2
+ %0 = load i16, ptr %p0.addr, align 2
+ %dx.any = call i1 @llvm.dx.any.i16(i16 %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_double
+; CHECK: fcmp une double %{{.*}}, 0.000000e+00
+define noundef i1 @any_double(double noundef %p0) {
+entry:
+ %p0.addr = alloca double, align 8
+ store double %p0, ptr %p0.addr, align 8
+ %0 = load double, ptr %p0.addr, align 8
+ %dx.any = call i1 @llvm.dx.any.f64(double %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_float
+; CHECK: fcmp une float %{{.*}}, 0.000000e+00
+define noundef i1 @any_float(float noundef %p0) {
+entry:
+ %p0.addr = alloca float, align 4
+ store float %p0, ptr %p0.addr, align 4
+ %0 = load float, ptr %p0.addr, align 4
+ %dx.any = call i1 @llvm.dx.any.f32(float %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_half
+; CHECK: fcmp une half %{{.*}}, 0xH0000
+define noundef i1 @any_half(half noundef %p0) {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %dx.any = call i1 @llvm.dx.any.f16(half %0)
+ ret i1 %dx.any
+}
+
+; CHECK-LABEL: any_bool4
+; CHECK: icmp ne <4 x i1> %extractvec, zeroinitialize
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 0
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 1
+; CHECK: or i1 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 2
+; CHECK: or i1 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i1> %{{.*}}, i64 3
+; CHECK: or i1 %{{.*}}, %{{.*}}
+define noundef i1 @any_bool4(<4 x i1> noundef %p0) {
+entry:
+ %p0.addr = alloca i8, align 1
+ %insertvec = shufflevector <4 x i1> %p0, <4 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+ %0 = bitcast <8 x i1> %insertvec to i8
+ store i8 %0, ptr %p0.addr, align 1
+ %load_bits = load i8, ptr %p0.addr, align 1
+ %1 = bitcast i8 %load_bits to <8 x i1>
+ %extractvec = shufflevector <8 x i1> %1, <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %dx.any = call i1 @llvm.dx.any.v4i1(<4 x i1> %extractvec)
+ ret i1 %dx.any
+}
+
+declare i1 @llvm.dx.any.v4i1(<4 x i1>)
+declare i1 @llvm.dx.any.i1(i1)
+declare i1 @llvm.dx.any.i16(i16)
+declare i1 @llvm.dx.any.i32(i32)
+declare i1 @llvm.dx.any.i64(i64)
+declare i1 @llvm.dx.any.f16(half)
+declare i1 @llvm.dx.any.f32(float)
+declare i1 @llvm.dx.any.f64(double)
diff --git a/llvm/test/CodeGen/DirectX/ceil.ll b/llvm/test/CodeGen/DirectX/ceil.ll
new file mode 100644
index 0000000..1585471
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ceil.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for ceil are generated for float and half.
+
+define noundef float @ceil_float(float noundef %a) {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 28, float %{{.*}})
+ %elt.ceil = call float @llvm.ceil.f32(float %a)
+ ret float %elt.ceil
+}
+
+define noundef half @ceil_half(half noundef %a) {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 28, half %{{.*}})
+ %elt.ceil = call half @llvm.ceil.f16(half %a)
+ ret half %elt.ceil
+}
+
+declare half @llvm.ceil.f16(half)
+declare float @llvm.ceil.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/ceil_error.ll b/llvm/test/CodeGen/DirectX/ceil_error.ll
new file mode 100644
index 0000000..1b554d8
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ceil_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation ceil does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @ceil_double(double noundef %a) {
+entry:
+ %elt.ceil = call double @llvm.ceil.f64(double %a)
+ ret double %elt.ceil
+}
diff --git a/llvm/test/CodeGen/DirectX/clamp-vec.ll b/llvm/test/CodeGen/DirectX/clamp-vec.ll
new file mode 100644
index 0000000..d4f33a1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/clamp-vec.ll
@@ -0,0 +1,74 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for clamp are generated for float/int/uint vectors.
+
+; CHECK-LABEL: clamp_half3
+define noundef <3 x half> @clamp_half3(<3 x half> noundef %a, <3 x half> noundef %b, <3 x half> noundef %c) {
+entry:
+ ; CHECK: call <3 x half> @llvm.maxnum.v3f16(<3 x half> %a, <3 x half> %b)
+ ; CHECK: call <3 x half> @llvm.minnum.v3f16(<3 x half> %{{.*}}, <3 x half> %c)
+ %dx.clamp = call <3 x half> @llvm.dx.clamp.v3f16(<3 x half> %a, <3 x half> %b, <3 x half> %c)
+ ret <3 x half> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_float4
+define noundef <4 x float> @clamp_float4(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %c) {
+entry:
+ ; CHECK: call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ; CHECK: call <4 x float> @llvm.minnum.v4f32(<4 x float> %{{.*}}, <4 x float> %c)
+ %dx.clamp = call <4 x float> @llvm.dx.clamp.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
+ ret <4 x float> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_double2
+define noundef <2 x double> @clamp_double2(<2 x double> noundef %a, <2 x double> noundef %b, <2 x double> noundef %c) {
+entry:
+ ; CHECK: call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ; CHECK: call <2 x double> @llvm.minnum.v2f64(<2 x double> %{{.*}}, <2 x double> %c)
+ %dx.clamp = call <2 x double> @llvm.dx.clamp.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
+ ret <2 x double> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_int4
+define noundef <4 x i32> @clamp_int4(<4 x i32> noundef %a, <4 x i32> noundef %b, <4 x i32> noundef %c) {
+entry:
+ ; CHECK: call <4 x i32> @llvm.smax.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ; CHECK: call <4 x i32> @llvm.smin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %c)
+ %dx.clamp = call <4 x i32> @llvm.dx.clamp.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c)
+ ret <4 x i32> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_uint16_t3
+define noundef <3 x i16> @clamp_uint16_t3(<3 x i16> noundef %a, <3 x i16> noundef %b, <3 x i16> noundef %c) {
+entry:
+ ; CHECK: call <3 x i16> @llvm.umax.v3i16(<3 x i16> %a, <3 x i16> %b)
+ ; CHECK: call <3 x i16> @llvm.umin.v3i16(<3 x i16> %{{.*}}, <3 x i16> %c)
+ %dx.clamp = call <3 x i16> @llvm.dx.uclamp.v3i16(<3 x i16> %a, <3 x i16> %b, <3 x i16> %c)
+ ret <3 x i16> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_uint4
+define noundef <4 x i32> @clamp_uint4(<4 x i32> noundef %a, <4 x i32> noundef %b, <4 x i32> noundef %c) {
+entry:
+ ; CHECK: call <4 x i32> @llvm.umax.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ; CHECK: call <4 x i32> @llvm.umin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %c)
+ %dx.clamp = call <4 x i32> @llvm.dx.uclamp.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c)
+ ret <4 x i32> %dx.clamp
+}
+
+; CHECK-LABEL: clamp_uint64_t4
+define noundef <2 x i64> @clamp_uint64_t4(<2 x i64> noundef %a, <2 x i64> noundef %b, <2 x i64> noundef %c) {
+entry:
+ ; CHECK: call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ; CHECK: call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %c)
+ %dx.clamp = call <2 x i64> @llvm.dx.uclamp.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c)
+ ret <2 x i64> %dx.clamp
+}
+
+declare <3 x half> @llvm.dx.clamp.v3f16(<3 x half>, <3 x half>, <3 x half>)
+declare <4 x float> @llvm.dx.clamp.v4f32(<4 x float>, <4 x float>, <4 x float>)
+declare <2 x double> @llvm.dx.clamp.v2f64(<2 x double>, <2 x double>, <2 x double>)
+declare <4 x i32> @llvm.dx.clamp.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <3 x i16> @llvm.dx.uclamp.v3i32(<3 x i16>, <3 x i32>, <3 x i16>)
+declare <4 x i32> @llvm.dx.uclamp.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.dx.uclamp.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/DirectX/clamp.ll b/llvm/test/CodeGen/DirectX/clamp.ll
new file mode 100644
index 0000000..f122313
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/clamp.ll
@@ -0,0 +1,94 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for clamp/uclamp are generated for half/float/double/i16/i32/i64.
+
+; CHECK-LABEL:test_clamp_i16
+define noundef i16 @test_clamp_i16(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 37, i16 %{{.*}}, i16 %{{.*}})
+; CHECK: call i16 @dx.op.binary.i16(i32 38, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.dx.clamp.i16(i16 %a, i16 %b, i16 %c)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_clamp_i32
+define noundef i32 @test_clamp_i32(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 37, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: call i32 @dx.op.binary.i32(i32 38, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.dx.clamp.i32(i32 %a, i32 %b, i32 %c)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_clamp_i64
+define noundef i64 @test_clamp_i64(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 37, i64 %a, i64 %b)
+; CHECK: call i64 @dx.op.binary.i64(i32 38, i64 %{{.*}}, i64 %c)
+ %0 = call i64 @llvm.dx.clamp.i64(i64 %a, i64 %b, i64 %c)
+ ret i64 %0
+}
+
+; CHECK-LABEL:test_clamp_half
+define noundef half @test_clamp_half(half noundef %a, half noundef %b, half noundef %c) {
+entry:
+; CHECK: call half @dx.op.binary.f16(i32 35, half %{{.*}}, half %{{.*}})
+; CHECK: call half @dx.op.binary.f16(i32 36, half %{{.*}}, half %{{.*}})
+ %0 = call half @llvm.dx.clamp.f16(half %a, half %b, half %c)
+ ret half %0
+}
+
+; CHECK-LABEL:test_clamp_float
+define noundef float @test_clamp_float(float noundef %a, float noundef %b, float noundef %c) {
+entry:
+; CHECK: call float @dx.op.binary.f32(i32 35, float %{{.*}}, float %{{.*}})
+; CHECK: call float @dx.op.binary.f32(i32 36, float %{{.*}}, float %{{.*}})
+ %0 = call float @llvm.dx.clamp.f32(float %a, float %b, float %c)
+ ret float %0
+}
+
+; CHECK-LABEL:test_clamp_double
+define noundef double @test_clamp_double(double noundef %a, double noundef %b, double noundef %c) {
+entry:
+; CHECK: call double @dx.op.binary.f64(i32 35, double %{{.*}}, double %{{.*}})
+; CHECK: call double @dx.op.binary.f64(i32 36, double %{{.*}}, double %{{.*}})
+ %0 = call double @llvm.dx.clamp.f64(double %a, double %b, double %c)
+ ret double %0
+}
+
+; CHECK-LABEL:test_uclamp_i16
+define noundef i16 @test_uclamp_i16(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 39, i16 %{{.*}}, i16 %{{.*}})
+; CHECK: call i16 @dx.op.binary.i16(i32 40, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.dx.uclamp.i16(i16 %a, i16 %b, i16 %c)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_uclamp_i32
+define noundef i32 @test_uclamp_i32(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 39, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: call i32 @dx.op.binary.i32(i32 40, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.dx.uclamp.i32(i32 %a, i32 %b, i32 %c)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_uclamp_i64
+define noundef i64 @test_uclamp_i64(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 39, i64 %a, i64 %b)
+; CHECK: call i64 @dx.op.binary.i64(i32 40, i64 %{{.*}}, i64 %c)
+ %0 = call i64 @llvm.dx.uclamp.i64(i64 %a, i64 %b, i64 %c)
+ ret i64 %0
+}
+
+declare half @llvm.dx.clamp.f16(half, half, half)
+declare float @llvm.dx.clamp.f32(float, float, float)
+declare double @llvm.dx.clamp.f64(double, double, double)
+declare i16 @llvm.dx.clamp.i16(i16, i16, i16)
+declare i32 @llvm.dx.clamp.i32(i32, i32, i32)
+declare i64 @llvm.dx.clamp.i64(i64, i64, i64)
+declare i16 @llvm.dx.uclamp.i16(i16, i16, i16)
+declare i32 @llvm.dx.uclamp.i32(i32, i32, i32)
+declare i64 @llvm.dx.uclamp.i64(i64, i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/cos.ll b/llvm/test/CodeGen/DirectX/cos.ll
new file mode 100644
index 0000000..00f2e2c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/cos.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for cos are generated for float and half.
+
+define noundef float @cos_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 12, float %{{.*}})
+ %elt.cos = call float @llvm.cos.f32(float %a)
+ ret float %elt.cos
+}
+
+define noundef half @cos_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 12, half %{{.*}})
+ %elt.cos = call half @llvm.cos.f16(half %a)
+ ret half %elt.cos
+}
+
+declare half @llvm.cos.f16(half)
+declare float @llvm.cos.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/cos_error.ll b/llvm/test/CodeGen/DirectX/cos_error.ll
new file mode 100644
index 0000000..a074f5b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/cos_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation cos does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @cos_double(double noundef %a) {
+entry:
+ %elt.cos = call double @llvm.cos.f64(double %a)
+ ret double %elt.cos
+}
diff --git a/llvm/test/CodeGen/DirectX/dot2_error.ll b/llvm/test/CodeGen/DirectX/dot2_error.ll
new file mode 100644
index 0000000..a27bfae
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot2_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot2 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double2(<2 x double> noundef %a, <2 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot2.v2f64(<2 x double> %a, <2 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/dot3_error.ll b/llvm/test/CodeGen/DirectX/dot3_error.ll
new file mode 100644
index 0000000..eb69fb1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot3_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot3 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double3(<3 x double> noundef %a, <3 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot3.v3f64(<3 x double> %a, <3 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/dot4_error.ll b/llvm/test/CodeGen/DirectX/dot4_error.ll
new file mode 100644
index 0000000..5cd6326
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot4_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot4 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double4(<4 x double> noundef %a, <4 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot4.v4f64(<4 x double> %a, <4 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/exp-vec.ll b/llvm/test/CodeGen/DirectX/exp-vec.ll
new file mode 100644
index 0000000..c937155
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/exp-vec.ll
@@ -0,0 +1,17 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for exp are generated for float and half.
+
+; CHECK-LABEL: exp_float4
+; CHECK: fmul <4 x float> <float 0x3FF7154760000000, float 0x3FF7154760000000, float 0x3FF7154760000000, float 0x3FF7154760000000>, %{{.*}}
+; CHECK: call <4 x float> @llvm.exp2.v4f32(<4 x float> %{{.*}})
+define noundef <4 x float> @exp_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.exp = call <4 x float> @llvm.exp.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.exp
+}
+
+declare <4 x float> @llvm.exp.v4f32(<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/exp.ll b/llvm/test/CodeGen/DirectX/exp.ll
new file mode 100644
index 0000000..fdafc14
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/exp.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for exp are generated for float and half.
+
+; CHECK-LABEL: exp_float
+; CHECK: fmul float 0x3FF7154760000000, %{{.*}}
+; CHECK: call float @dx.op.unary.f32(i32 21, float %{{.*}})
+define noundef float @exp_float(float noundef %a) {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %elt.exp = call float @llvm.exp.f32(float %0)
+ ret float %elt.exp
+}
+
+; CHECK-LABEL: exp_half
+; CHECK: fmul half 0xH3DC5, %{{.*}}
+; CHECK: call half @dx.op.unary.f16(i32 21, half %{{.*}})
+; Function Attrs: noinline nounwind optnone
+define noundef half @exp_half(half noundef %a) {
+entry:
+ %a.addr = alloca half, align 2
+ store half %a, ptr %a.addr, align 2
+ %0 = load half, ptr %a.addr, align 2
+ %elt.exp = call half @llvm.exp.f16(half %0)
+ ret half %elt.exp
+}
+
+declare half @llvm.exp.f16(half)
+declare float @llvm.exp.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/fabs.ll b/llvm/test/CodeGen/DirectX/fabs.ll
new file mode 100644
index 0000000..3b3f8aa
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fabs.ll
@@ -0,0 +1,32 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for abs are generated for float, half, and double.
+
+
+; CHECK-LABEL: fabs_half
+define noundef half @fabs_half(half noundef %a) {
+entry:
+ ; CHECK: call half @dx.op.unary.f16(i32 6, half %{{.*}})
+ %elt.abs = call half @llvm.fabs.f16(half %a)
+ ret half %elt.abs
+}
+
+; CHECK-LABEL: fabs_float
+define noundef float @fabs_float(float noundef %a) {
+entry:
+; CHECK: call float @dx.op.unary.f32(i32 6, float %{{.*}})
+ %elt.abs = call float @llvm.fabs.f32(float %a)
+ ret float %elt.abs
+}
+
+; CHECK-LABEL: fabs_double
+define noundef double @fabs_double(double noundef %a) {
+entry:
+; CHECK: call double @dx.op.unary.f64(i32 6, double %{{.*}})
+ %elt.abs = call double @llvm.fabs.f64(double %a)
+ ret double %elt.abs
+}
+
+declare half @llvm.fabs.f16(half)
+declare float @llvm.fabs.f32(float)
+declare double @llvm.fabs.f64(double)
diff --git a/llvm/test/CodeGen/DirectX/fdot.ll b/llvm/test/CodeGen/DirectX/fdot.ll
new file mode 100644
index 0000000..3e13b2a
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fdot.ll
@@ -0,0 +1,94 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for dot are generated for int/uint vectors.
+
+; CHECK-LABEL: dot_half2
+define noundef half @dot_half2(<2 x half> noundef %a, <2 x half> noundef %b) {
+entry:
+; CHECK: extractelement <2 x half> %a, i32 0
+; CHECK: extractelement <2 x half> %a, i32 1
+; CHECK: extractelement <2 x half> %b, i32 0
+; CHECK: extractelement <2 x half> %b, i32 1
+; CHECK: call half @dx.op.dot2.f16(i32 54, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot2.v2f16(<2 x half> %a, <2 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_half3
+define noundef half @dot_half3(<3 x half> noundef %a, <3 x half> noundef %b) {
+entry:
+; CHECK: extractelement <3 x half> %a, i32 0
+; CHECK: extractelement <3 x half> %a, i32 1
+; CHECK: extractelement <3 x half> %a, i32 2
+; CHECK: extractelement <3 x half> %b, i32 0
+; CHECK: extractelement <3 x half> %b, i32 1
+; CHECK: extractelement <3 x half> %b, i32 2
+; CHECK: call half @dx.op.dot3.f16(i32 55, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot3.v3f16(<3 x half> %a, <3 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_half4
+define noundef half @dot_half4(<4 x half> noundef %a, <4 x half> noundef %b) {
+entry:
+; CHECK: extractelement <4 x half> %a, i32 0
+; CHECK: extractelement <4 x half> %a, i32 1
+; CHECK: extractelement <4 x half> %a, i32 2
+; CHECK: extractelement <4 x half> %a, i32 3
+; CHECK: extractelement <4 x half> %b, i32 0
+; CHECK: extractelement <4 x half> %b, i32 1
+; CHECK: extractelement <4 x half> %b, i32 2
+; CHECK: extractelement <4 x half> %b, i32 3
+; CHECK: call half @dx.op.dot4.f16(i32 56, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot4.v4f16(<4 x half> %a, <4 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_float2
+define noundef float @dot_float2(<2 x float> noundef %a, <2 x float> noundef %b) {
+entry:
+; CHECK: extractelement <2 x float> %a, i32 0
+; CHECK: extractelement <2 x float> %a, i32 1
+; CHECK: extractelement <2 x float> %b, i32 0
+; CHECK: extractelement <2 x float> %b, i32 1
+; CHECK: call float @dx.op.dot2.f32(i32 54, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %a, <2 x float> %b)
+ ret float %dx.dot
+}
+
+; CHECK-LABEL: dot_float3
+define noundef float @dot_float3(<3 x float> noundef %a, <3 x float> noundef %b) {
+entry:
+; CHECK: extractelement <3 x float> %a, i32 0
+; CHECK: extractelement <3 x float> %a, i32 1
+; CHECK: extractelement <3 x float> %a, i32 2
+; CHECK: extractelement <3 x float> %b, i32 0
+; CHECK: extractelement <3 x float> %b, i32 1
+; CHECK: extractelement <3 x float> %b, i32 2
+; CHECK: call float @dx.op.dot3.f32(i32 55, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot3.v3f32(<3 x float> %a, <3 x float> %b)
+ ret float %dx.dot
+}
+
+; CHECK-LABEL: dot_float4
+define noundef float @dot_float4(<4 x float> noundef %a, <4 x float> noundef %b) {
+entry:
+; CHECK: extractelement <4 x float> %a, i32 0
+; CHECK: extractelement <4 x float> %a, i32 1
+; CHECK: extractelement <4 x float> %a, i32 2
+; CHECK: extractelement <4 x float> %a, i32 3
+; CHECK: extractelement <4 x float> %b, i32 0
+; CHECK: extractelement <4 x float> %b, i32 1
+; CHECK: extractelement <4 x float> %b, i32 2
+; CHECK: extractelement <4 x float> %b, i32 3
+; CHECK: call float @dx.op.dot4.f32(i32 56, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot4.v4f32(<4 x float> %a, <4 x float> %b)
+ ret float %dx.dot
+}
+
+declare half @llvm.dx.dot.v2f16(<2 x half> , <2 x half> )
+declare half @llvm.dx.dot.v3f16(<3 x half> , <3 x half> )
+declare half @llvm.dx.dot.v4f16(<4 x half> , <4 x half> )
+declare float @llvm.dx.dot.v2f32(<2 x float>, <2 x float>)
+declare float @llvm.dx.dot.v3f32(<3 x float>, <3 x float>)
+declare float @llvm.dx.dot.v4f32(<4 x float>, <4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/floor.ll b/llvm/test/CodeGen/DirectX/floor.ll
new file mode 100644
index 0000000..b033e2e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/floor.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for floor are generated for float and half.
+
+define noundef float @floor_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 27, float %{{.*}})
+ %elt.floor = call float @llvm.floor.f32(float %a)
+ ret float %elt.floor
+}
+
+define noundef half @floor_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 27, half %{{.*}})
+ %elt.floor = call half @llvm.floor.f16(half %a)
+ ret half %elt.floor
+}
+
+declare half @llvm.floor.f16(half)
+declare float @llvm.floor.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/floor_error.ll b/llvm/test/CodeGen/DirectX/floor_error.ll
new file mode 100644
index 0000000..3b51a4b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/floor_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation floor does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @floor_double(double noundef %a) {
+entry:
+ %elt.floor = call double @llvm.floor.f64(double %a)
+ ret double %elt.floor
+}
diff --git a/llvm/test/CodeGen/DirectX/fmax.ll b/llvm/test/CodeGen/DirectX/fmax.ll
new file mode 100644
index 0000000..aff722c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fmax.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for fmax are generated for half/float/double.
+
+; CHECK-LABEL:test_fmax_half
+define noundef half @test_fmax_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: call half @dx.op.binary.f16(i32 35, half %{{.*}}, half %{{.*}})
+ %0 = call half @llvm.maxnum.f16(half %a, half %b)
+ ret half %0
+}
+
+; CHECK-LABEL:test_fmax_float
+define noundef float @test_fmax_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: call float @dx.op.binary.f32(i32 35, float %{{.*}}, float %{{.*}})
+ %0 = call float @llvm.maxnum.f32(float %a, float %b)
+ ret float %0
+}
+
+; CHECK-LABEL:test_fmax_double
+define noundef double @test_fmax_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: call double @dx.op.binary.f64(i32 35, double %{{.*}}, double %{{.*}})
+ %0 = call double @llvm.maxnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.maxnum.f16(half, half)
+declare float @llvm.maxnum.f32(float, float)
+declare double @llvm.maxnum.f64(double, double)
diff --git a/llvm/test/CodeGen/DirectX/fmin.ll b/llvm/test/CodeGen/DirectX/fmin.ll
new file mode 100644
index 0000000..2f7c209
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fmin.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for fmin are generated for half/float/double.
+
+; CHECK-LABEL:test_fmin_half
+define noundef half @test_fmin_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: call half @dx.op.binary.f16(i32 36, half %{{.*}}, half %{{.*}})
+ %0 = call half @llvm.minnum.f16(half %a, half %b)
+ ret half %0
+}
+
+; CHECK-LABEL:test_fmin_float
+define noundef float @test_fmin_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: call float @dx.op.binary.f32(i32 36, float %{{.*}}, float %{{.*}})
+ %0 = call float @llvm.minnum.f32(float %a, float %b)
+ ret float %0
+}
+
+; CHECK-LABEL:test_fmin_double
+define noundef double @test_fmin_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: call double @dx.op.binary.f64(i32 36, double %{{.*}}, double %{{.*}})
+ %0 = call double @llvm.minnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.minnum.f16(half, half)
+declare float @llvm.minnum.f32(float, float)
+declare double @llvm.minnum.f64(double, double)
diff --git a/llvm/test/CodeGen/DirectX/idot.ll b/llvm/test/CodeGen/DirectX/idot.ll
new file mode 100644
index 0000000..9f89a8d
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/idot.ll
@@ -0,0 +1,100 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for dot are generated for int/uint vectors.
+
+; CHECK-LABEL: dot_int16_t2
+define noundef i16 @dot_int16_t2(<2 x i16> noundef %a, <2 x i16> noundef %b) {
+entry:
+; CHECK: extractelement <2 x i16> %a, i64 0
+; CHECK: extractelement <2 x i16> %b, i64 0
+; CHECK: mul i16 %{{.*}}, %{{.*}}
+; CHECK: extractelement <2 x i16> %a, i64 1
+; CHECK: extractelement <2 x i16> %b, i64 1
+; EXPCHECK: call i16 @llvm.dx.imad.i16(i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+ %dx.dot = call i16 @llvm.dx.sdot.v3i16(<2 x i16> %a, <2 x i16> %b)
+ ret i16 %dx.dot
+}
+
+; CHECK-LABEL: sdot_int4
+define noundef i32 @sdot_int4(<4 x i32> noundef %a, <4 x i32> noundef %b) {
+entry:
+; CHECK: extractelement <4 x i32> %a, i64 0
+; CHECK: extractelement <4 x i32> %b, i64 0
+; CHECK: mul i32 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i32> %a, i64 1
+; CHECK: extractelement <4 x i32> %b, i64 1
+; EXPCHECK: call i32 @llvm.dx.imad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 2
+; CHECK: extractelement <4 x i32> %b, i64 2
+; EXPCHECK: call i32 @llvm.dx.imad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 3
+; CHECK: extractelement <4 x i32> %b, i64 3
+; EXPCHECK: call i32 @llvm.dx.imad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ %dx.dot = call i32 @llvm.dx.sdot.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret i32 %dx.dot
+}
+
+; CHECK-LABEL: dot_uint16_t3
+define noundef i16 @dot_uint16_t3(<3 x i16> noundef %a, <3 x i16> noundef %b) {
+entry:
+; CHECK: extractelement <3 x i16> %a, i64 0
+; CHECK: extractelement <3 x i16> %b, i64 0
+; CHECK: mul i16 %{{.*}}, %{{.*}}
+; CHECK: extractelement <3 x i16> %a, i64 1
+; CHECK: extractelement <3 x i16> %b, i64 1
+; EXPCHECK: call i16 @llvm.dx.umad.i16(i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; CHECK: extractelement <3 x i16> %a, i64 2
+; CHECK: extractelement <3 x i16> %b, i64 2
+; EXPCHECK: call i16 @llvm.dx.umad.i16(i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}})
+ %dx.dot = call i16 @llvm.dx.udot.v3i16(<3 x i16> %a, <3 x i16> %b)
+ ret i16 %dx.dot
+}
+
+; CHECK-LABEL: dot_uint4
+define noundef i32 @dot_uint4(<4 x i32> noundef %a, <4 x i32> noundef %b) {
+entry:
+; CHECK: extractelement <4 x i32> %a, i64 0
+; CHECK: extractelement <4 x i32> %b, i64 0
+; CHECK: mul i32 %{{.*}}, %{{.*}}
+; CHECK: extractelement <4 x i32> %a, i64 1
+; CHECK: extractelement <4 x i32> %b, i64 1
+; EXPCHECK: call i32 @llvm.dx.umad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 2
+; CHECK: extractelement <4 x i32> %b, i64 2
+; EXPCHECK: call i32 @llvm.dx.umad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: extractelement <4 x i32> %a, i64 3
+; CHECK: extractelement <4 x i32> %b, i64 3
+; EXPCHECK: call i32 @llvm.dx.umad.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ %dx.dot = call i32 @llvm.dx.udot.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret i32 %dx.dot
+}
+
+; CHECK-LABEL: dot_uint64_t4
+define noundef i64 @dot_uint64_t4(<2 x i64> noundef %a, <2 x i64> noundef %b) {
+entry:
+; CHECK: extractelement <2 x i64> %a, i64 0
+; CHECK: extractelement <2 x i64> %b, i64 0
+; CHECK: mul i64 %{{.*}}, %{{.*}}
+; CHECK: extractelement <2 x i64> %a, i64 1
+; CHECK: extractelement <2 x i64> %b, i64 1
+; EXPCHECK: call i64 @llvm.dx.umad.i64(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+; DOPCHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+ %dx.dot = call i64 @llvm.dx.udot.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret i64 %dx.dot
+}
+
+declare i16 @llvm.dx.sdot.v2i16(<2 x i16>, <2 x i16>)
+declare i32 @llvm.dx.sdot.v4i32(<4 x i32>, <4 x i32>)
+declare i16 @llvm.dx.udot.v3i32(<3 x i16>, <3 x i16>)
+declare i32 @llvm.dx.udot.v4i32(<4 x i32>, <4 x i32>)
+declare i64 @llvm.dx.udot.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/DirectX/isinf.ll b/llvm/test/CodeGen/DirectX/isinf.ll
new file mode 100644
index 0000000..e2975da
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/isinf.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for isinf are generated for float and half.
+; CHECK: call i1 @dx.op.isSpecialFloat.f32(i32 9, float %{{.*}})
+; CHECK: call i1 @dx.op.isSpecialFloat.f16(i32 9, half %{{.*}})
+
+; Function Attrs: noinline nounwind optnone
+define noundef i1 @isinf_float(float noundef %a) #0 {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %dx.isinf = call i1 @llvm.dx.isinf.f32(float %0)
+ ret i1 %dx.isinf
+}
+
+; Function Attrs: noinline nounwind optnone
+define noundef i1 @isinf_half(half noundef %p0) #0 {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %dx.isinf = call i1 @llvm.dx.isinf.f16(half %0)
+ ret i1 %dx.isinf
+}
diff --git a/llvm/test/CodeGen/DirectX/isinf_error.ll b/llvm/test/CodeGen/DirectX/isinf_error.ll
new file mode 100644
index 0000000..95b2d0c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/isinf_error.ll
@@ -0,0 +1,13 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation isinf does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef i1 @isinf_double(double noundef %a) #0 {
+entry:
+ %a.addr = alloca double, align 8
+ store double %a, ptr %a.addr, align 8
+ %0 = load double, ptr %a.addr, align 8
+ %dx.isinf = call i1 @llvm.dx.isinf.f64(double %0)
+ ret i1 %dx.isinf
+}
diff --git a/llvm/test/CodeGen/DirectX/lerp.ll b/llvm/test/CodeGen/DirectX/lerp.ll
new file mode 100644
index 0000000..ebd7e13
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/lerp.ll
@@ -0,0 +1,56 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for lerp are generated for float and half.
+
+; CHECK-LABEL: lerp_half
+; CHECK: fsub half %{{.*}}, %{{.*}}
+; CHECK: fmul half %{{.*}}, %{{.*}}
+; CHECK: fadd half %{{.*}}, %{{.*}}
+define noundef half @lerp_half(half noundef %p0) {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %1 = load half, ptr %p0.addr, align 2
+ %2 = load half, ptr %p0.addr, align 2
+ %dx.lerp = call half @llvm.dx.lerp.f16(half %0, half %1, half %2)
+ ret half %dx.lerp
+}
+
+; CHECK-LABEL: lerp_float
+; CHECK: fsub float %{{.*}}, %{{.*}}
+; CHECK: fmul float %{{.*}}, %{{.*}}
+; CHECK: fadd float %{{.*}}, %{{.*}}
+define noundef float @lerp_float(float noundef %p0, float noundef %p1) {
+entry:
+ %p1.addr = alloca float, align 4
+ %p0.addr = alloca float, align 4
+ store float %p1, ptr %p1.addr, align 4
+ store float %p0, ptr %p0.addr, align 4
+ %0 = load float, ptr %p0.addr, align 4
+ %1 = load float, ptr %p0.addr, align 4
+ %2 = load float, ptr %p0.addr, align 4
+ %dx.lerp = call float @llvm.dx.lerp.f32(float %0, float %1, float %2)
+ ret float %dx.lerp
+}
+
+; CHECK-LABEL: lerp_float4
+; CHECK: fsub <4 x float> %{{.*}}, %{{.*}}
+; CHECK: fmul <4 x float> %{{.*}}, %{{.*}}
+; CHECK: fadd <4 x float> %{{.*}}, %{{.*}}
+define noundef <4 x float> @lerp_float4(<4 x float> noundef %p0, <4 x float> noundef %p1) {
+entry:
+ %p1.addr = alloca <4 x float>, align 16
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p1, ptr %p1.addr, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %1 = load <4 x float>, ptr %p0.addr, align 16
+ %2 = load <4 x float>, ptr %p0.addr, align 16
+ %dx.lerp = call <4 x float> @llvm.dx.lerp.v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+ ret <4 x float> %dx.lerp
+}
+
+declare half @llvm.dx.lerp.f16(half, half, half)
+declare float @llvm.dx.lerp.f32(float, float, float)
+declare <4 x float> @llvm.dx.lerp.v4f32(<4 x float>, <4 x float>, <4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/lib_entry.ll b/llvm/test/CodeGen/DirectX/lib_entry.ll
index 9208d6d..5254a08 100644
--- a/llvm/test/CodeGen/DirectX/lib_entry.ll
+++ b/llvm/test/CodeGen/DirectX/lib_entry.ll
@@ -7,7 +7,7 @@ target triple = "dxil-unknown-shadermodel6.7-library"
; Make sure generate empty entry for lib profile.
;CHECK:![[empty_entry]] = !{null, !"", null, null, ![[shader_flags:[0-9]+]]}
; Make sure double is marked for shader flags.
-;CHECK:![[shader_flags]] = !{i32 0, i64 1}
+;CHECK:![[shader_flags]] = !{i32 0, i64 4}
;CHECK:![[entry]] = !{ptr @entry, !"entry", null, null, ![[extra:[0-9]+]]}
;CHECK:![[extra]] = !{i32 8, i32 5, i32 4, ![[numthreads:[0-9]+]]}
;CHECK:![[numthreads]] = !{i32 1, i32 2, i32 1}
diff --git a/llvm/test/CodeGen/DirectX/log-vec.ll b/llvm/test/CodeGen/DirectX/log-vec.ll
new file mode 100644
index 0000000..4768fdd
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log-vec.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for log are generated for float and half.
+
+; CHECK-LABEL: log_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %{{.*}})
+; CHECK: fmul <4 x float> <float 0x3FE62E4300000000, float 0x3FE62E4300000000, float 0x3FE62E4300000000, float 0x3FE62E4300000000>, %{{.*}}
+define noundef <4 x float> @log_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.log = call <4 x float> @llvm.log.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.log
+}
+
+; CHECK-LABEL: log10_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %{{.*}})
+; CHECK: fmul <4 x float> <float 0x3FD3441340000000, float 0x3FD3441340000000, float 0x3FD3441340000000, float 0x3FD3441340000000>, %{{.*}}
+define noundef <4 x float> @log10_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.log10 = call <4 x float> @llvm.log10.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.log10
+}
+
+declare <4 x float> @llvm.log.v4f32(<4 x float>)
+declare <4 x float> @llvm.log10.v4f32(<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/log.ll b/llvm/test/CodeGen/DirectX/log.ll
new file mode 100644
index 0000000..172c3bf
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for log are generated.
+
+define noundef float @log_float(float noundef %a) #0 {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %{{.*}})
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float 0x3FE62E4300000000, %{{.*}}
+ %elt.log = call float @llvm.log.f32(float %a)
+ ret float %elt.log
+}
+
+define noundef half @log_half(half noundef %a) #0 {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %{{.*}})
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half 0xH398C, %{{.*}}
+ %elt.log = call half @llvm.log.f16(half %a)
+ ret half %elt.log
+}
+
+declare half @llvm.log.f16(half)
+declare float @llvm.log.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log10.ll b/llvm/test/CodeGen/DirectX/log10.ll
new file mode 100644
index 0000000..d4f827a
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log10.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for log10 are generated.
+
+define noundef float @log10_float(float noundef %a) #0 {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %{{.*}})
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float 0x3FD3441340000000, %{{.*}}
+ %elt.log10 = call float @llvm.log10.f32(float %a)
+ ret float %elt.log10
+}
+
+define noundef half @log10_half(half noundef %a) #0 {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %{{.*}})
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half 0xH34D1, %{{.*}}
+ %elt.log10 = call half @llvm.log10.f16(half %a)
+ ret half %elt.log10
+}
+
+declare half @llvm.log10.f16(half)
+declare float @llvm.log10.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log2.ll b/llvm/test/CodeGen/DirectX/log2.ll
new file mode 100644
index 0000000..2164d4d
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log2.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for log2 are generated for float and half.
+
+define noundef float @log2_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 23, float %{{.*}})
+ %elt.log2 = call float @llvm.log2.f32(float %a)
+ ret float %elt.log2
+}
+
+define noundef half @log2_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 23, half %{{.*}})
+ %elt.log2 = call half @llvm.log2.f16(half %a)
+ ret half %elt.log2
+}
+
+declare half @llvm.log2.f16(half)
+declare float @llvm.log2.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log2_error.ll b/llvm/test/CodeGen/DirectX/log2_error.ll
new file mode 100644
index 0000000..a26f6e8
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log2_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation log2 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @log2_double(double noundef %a) {
+entry:
+ %elt.log2 = call double @llvm.log2.f64(double %a)
+ ret double %elt.log2
+}
diff --git a/llvm/test/CodeGen/DirectX/pow-vec.ll b/llvm/test/CodeGen/DirectX/pow-vec.ll
new file mode 100644
index 0000000..781fa5b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/pow-vec.ll
@@ -0,0 +1,15 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for pow are generated for float and half.
+
+; CHECK-LABEL: pow_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %a)
+; CHECK: fmul <4 x float> %{{.*}}, %b
+; CHECK: call <4 x float> @llvm.exp2.v4f32(<4 x float> %{{.*}})
+define noundef <4 x float> @pow_float4(<4 x float> noundef %a, <4 x float> noundef %b) {
+entry:
+ %elt.pow = call <4 x float> @llvm.pow.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %elt.pow
+}
+
+declare <4 x float> @llvm.pow.v4f32(<4 x float>,<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/pow.ll b/llvm/test/CodeGen/DirectX/pow.ll
new file mode 100644
index 0000000..25ce0fe
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/pow.ll
@@ -0,0 +1,29 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for pow are generated.
+
+define noundef float @pow_float(float noundef %a, float noundef %b) {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %a)
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float %{{.*}}, %b
+; DOPCHECK: call float @dx.op.unary.f32(i32 21, float %{{.*}})
+; EXPCHECK: call float @llvm.exp2.f32(float %{{.*}})
+ %elt.pow = call float @llvm.pow.f32(float %a, float %b)
+ ret float %elt.pow
+}
+
+define noundef half @pow_half(half noundef %a, half noundef %b) {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %a)
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half %{{.*}}, %b
+; DOPCHECK: call half @dx.op.unary.f16(i32 21, half %{{.*}})
+; EXPCHECK: call half @llvm.exp2.f16(half %{{.*}})
+ %elt.pow = call half @llvm.pow.f16(half %a, half %b)
+ ret half %elt.pow
+}
+
+declare half @llvm.pow.f16(half,half)
+declare float @llvm.pow.f32(float,float)
diff --git a/llvm/test/CodeGen/DirectX/rcp.ll b/llvm/test/CodeGen/DirectX/rcp.ll
new file mode 100644
index 0000000..65abe83
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/rcp.ll
@@ -0,0 +1,52 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for rcp are generated for float, double, and half.
+
+; CHECK-LABEL: rcp_float4
+; CHECK: fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %{{.*}}
+define noundef <4 x float> @rcp_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %dx.rcp = call <4 x float> @llvm.dx.rcp.v4f32(<4 x float> %0)
+ ret <4 x float> %dx.rcp
+}
+
+; CHECK-LABEL: rcp_double4
+; CHECK: fdiv <4 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>, %{{.*}}
+define noundef <4 x double> @rcp_double4(<4 x double> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x double>, align 16
+ store <4 x double> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x double>, ptr %p0.addr, align 16
+ %dx.rcp = call <4 x double> @llvm.dx.rcp.v4f64(<4 x double> %0)
+ ret <4 x double> %dx.rcp
+}
+
+; CHECK-LABEL: rcp_half4
+; CHECK: fdiv <4 x half> <half 0xH3C00, half 0xH3C00, half 0xH3C00, half 0xH3C00>, %{{.*}}
+define noundef <4 x half> @rcp_half4(<4 x half> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x half>, align 16
+ store <4 x half> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x half>, ptr %p0.addr, align 16
+ %dx.rcp = call <4 x half> @llvm.dx.rcp.v4f16(<4 x half> %0)
+ ret <4 x half> %dx.rcp
+}
+
+; CHECK-LABEL: rcp_half
+; CHECK: fdiv half 0xH3C00, %{{.*}}
+define noundef half @rcp_half(half noundef %p0) {
+entry:
+ %p0.addr = alloca half, align 2
+ store half %p0, ptr %p0.addr, align 2
+ %0 = load half, ptr %p0.addr, align 2
+ %dx.rcp = call half @llvm.dx.rcp.f16(half %0)
+ ret half %dx.rcp
+}
+
+declare half @llvm.dx.rcp.f16(half)
+declare <4 x half> @llvm.dx.rcp.v4f16(<4 x half>)
+declare <4 x float> @llvm.dx.rcp.v4f32(<4 x float>)
+declare <4 x double> @llvm.dx.rcp.v4f64(<4 x double>)
diff --git a/llvm/test/CodeGen/DirectX/reversebits.ll b/llvm/test/CodeGen/DirectX/reversebits.ll
new file mode 100644
index 0000000..b6a7a1b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/reversebits.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for reversebits are generated for all integer types.
+
+; Function Attrs: nounwind
+define noundef i16 @test_bitreverse_short(i16 noundef %a) {
+entry:
+; CHECK:call i16 @dx.op.unary.i16(i32 30, i16 %{{.*}})
+ %elt.bitreverse = call i16 @llvm.bitreverse.i16(i16 %a)
+ ret i16 %elt.bitreverse
+}
+
+; Function Attrs: nounwind
+define noundef i32 @test_bitreverse_int(i32 noundef %a) {
+entry:
+; CHECK:call i32 @dx.op.unary.i32(i32 30, i32 %{{.*}})
+ %elt.bitreverse = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %elt.bitreverse
+}
+
+; Function Attrs: nounwind
+define noundef i64 @test_bitreverse_long(i64 noundef %a) {
+entry:
+; CHECK:call i64 @dx.op.unary.i64(i32 30, i64 %{{.*}})
+ %elt.bitreverse = call i64 @llvm.bitreverse.i64(i64 %a)
+ ret i64 %elt.bitreverse
+}
+
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
diff --git a/llvm/test/CodeGen/DirectX/round.ll b/llvm/test/CodeGen/DirectX/round.ll
index 5d53a79..e0a3772 100644
--- a/llvm/test/CodeGen/DirectX/round.ll
+++ b/llvm/test/CodeGen/DirectX/round.ll
@@ -1,31 +1,22 @@
; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
; Make sure dxil operation function calls for round are generated for float and half.
-; CHECK:call float @dx.op.unary.f32(i32 26, float %{{.*}})
-; CHECK:call half @dx.op.unary.f16(i32 26, half %{{.*}})
-target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
-target triple = "dxil-pc-shadermodel6.7-library"
-
-; Function Attrs: noinline nounwind optnone
-define noundef float @round_float(float noundef %a) #0 {
+; CHECK-LABEL: round_half
+define noundef half @round_half(half noundef %a) {
entry:
- %a.addr = alloca float, align 4
- store float %a, ptr %a.addr, align 4
- %0 = load float, ptr %a.addr, align 4
- %elt.round = call float @llvm.round.f32(float %0)
- ret float %elt.round
+; CHECK: call half @dx.op.unary.f16(i32 26, half %{{.*}})
+ %elt.roundeven = call half @llvm.roundeven.f16(half %a)
+ ret half %elt.roundeven
}
-; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
-declare float @llvm.round.f32(float) #1
-
-; Function Attrs: noinline nounwind optnone
-define noundef half @round_half(half noundef %a) #0 {
+; CHECK-LABEL: round_float
+define noundef float @round_float(float noundef %a) {
entry:
- %a.addr = alloca half, align 2
- store half %a, ptr %a.addr, align 2
- %0 = load half, ptr %a.addr, align 2
- %elt.round = call half @llvm.round.f16(half %0)
- ret half %elt.round
+; CHECK: call float @dx.op.unary.f32(i32 26, float %{{.*}})
+ %elt.roundeven = call float @llvm.roundeven.f32(float %a)
+ ret float %elt.roundeven
}
+
+declare half @llvm.roundeven.f16(half)
+declare float @llvm.roundeven.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/round_error.ll b/llvm/test/CodeGen/DirectX/round_error.ll
index 3bd87b2..2d27fbb 100644
--- a/llvm/test/CodeGen/DirectX/round_error.ll
+++ b/llvm/test/CodeGen/DirectX/round_error.ll
@@ -8,6 +8,6 @@ entry:
%a.addr = alloca double, align 8
store double %a, ptr %a.addr, align 8
%0 = load double, ptr %a.addr, align 8
- %elt.round = call double @llvm.round.f64(double %0)
- ret double %elt.round
+ %elt.roundeven = call double @llvm.roundeven.f64(double %0)
+ ret double %elt.roundeven
}
diff --git a/llvm/test/CodeGen/DirectX/rsqrt.ll b/llvm/test/CodeGen/DirectX/rsqrt.ll
new file mode 100644
index 0000000..52af0e6
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/rsqrt.ll
@@ -0,0 +1,28 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for rsqrt are generated for float and half.
+
+; CHECK-LABEL: rsqrt_float
+; CHECK: call float @dx.op.unary.f32(i32 25, float %{{.*}})
+define noundef float @rsqrt_float(float noundef %a) {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %dx.rsqrt = call float @llvm.dx.rsqrt.f32(float %0)
+ ret float %dx.rsqrt
+}
+
+; CHECK-LABEL: rsqrt_half
+; CHECK: call half @dx.op.unary.f16(i32 25, half %{{.*}})
+define noundef half @rsqrt_half(half noundef %a) {
+entry:
+ %a.addr = alloca half, align 2
+ store half %a, ptr %a.addr, align 2
+ %0 = load half, ptr %a.addr, align 2
+ %dx.rsqrt = call half @llvm.dx.rsqrt.f16(half %0)
+ ret half %dx.rsqrt
+}
+
+declare half @llvm.dx.rsqrt.f16(half)
+declare float @llvm.dx.rsqrt.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/rsqrt_error.ll b/llvm/test/CodeGen/DirectX/rsqrt_error.ll
new file mode 100644
index 0000000..9cd5002
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/rsqrt_error.ll
@@ -0,0 +1,14 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation rsqrt does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+; Function Attrs: noinline nounwind optnone
+define noundef double @rsqrt_double(double noundef %a) #0 {
+entry:
+ %a.addr = alloca double, align 8
+ store double %a, ptr %a.addr, align 8
+ %0 = load double, ptr %a.addr, align 8
+ %dx.rsqrt = call double @llvm.dx.rsqrt.f64(double %0)
+ ret double %dx.rsqrt
+}
diff --git a/llvm/test/CodeGen/DirectX/smax.ll b/llvm/test/CodeGen/DirectX/smax.ll
new file mode 100644
index 0000000..8b24067
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/smax.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for smax are generated for i16/i32/i64.
+
+; CHECK-LABEL:test_smax_i16
+define noundef i16 @test_smax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 37, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_smax_i32
+define noundef i32 @test_smax_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 37, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_smax_i64
+define noundef i64 @test_smax_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 37, i64 %{{.*}}, i64 %{{.*}})
+ %0 = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/smin.ll b/llvm/test/CodeGen/DirectX/smin.ll
new file mode 100644
index 0000000..b2b40a1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/smin.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for smin are generated for i16/i32/i64.
+
+; CHECK-LABEL:test_smin_i16
+define noundef i16 @test_smin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 38, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_smin_i32
+define noundef i32 @test_smin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 38, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_smin_i64
+define noundef i64 @test_smin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 38, i64 %{{.*}}, i64 %{{.*}})
+ %0 = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/sqrt.ll b/llvm/test/CodeGen/DirectX/sqrt.ll
new file mode 100644
index 0000000..76a572e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/sqrt.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for sqrt are generated for float and half.
+
+define noundef float @sqrt_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 24, float %{{.*}})
+ %elt.sqrt = call float @llvm.sqrt.f32(float %a)
+ ret float %elt.sqrt
+}
+
+define noundef half @sqrt_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 24, half %{{.*}})
+ %elt.sqrt = call half @llvm.sqrt.f16(half %a)
+ ret half %elt.sqrt
+}
+
+declare half @llvm.sqrt.f16(half)
+declare float @llvm.sqrt.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/sqrt_error.ll b/llvm/test/CodeGen/DirectX/sqrt_error.ll
new file mode 100644
index 0000000..fffa2e1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/sqrt_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation sqrt does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @sqrt_double(double noundef %a) {
+entry:
+ %elt.sqrt = call double @llvm.sqrt.f64(double %a)
+ ret double %elt.sqrt
+}
diff --git a/llvm/test/CodeGen/DirectX/trunc.ll b/llvm/test/CodeGen/DirectX/trunc.ll
new file mode 100644
index 0000000..2072f28
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/trunc.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for trunc are generated for float and half.
+
+define noundef float @trunc_float(float noundef %a) {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 29, float %{{.*}})
+ %elt.trunc = call float @llvm.trunc.f32(float %a)
+ ret float %elt.trunc
+}
+
+define noundef half @trunc_half(half noundef %a) {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 29, half %{{.*}})
+ %elt.trunc = call half @llvm.trunc.f16(half %a)
+ ret half %elt.trunc
+}
+
+declare half @llvm.trunc.f16(half)
+declare float @llvm.trunc.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/trunc_error.ll b/llvm/test/CodeGen/DirectX/trunc_error.ll
new file mode 100644
index 0000000..751b0b9
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/trunc_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation trunc does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @trunc_double(double noundef %a) {
+entry:
+ %elt.trunc = call double @llvm.trunc.f64(double %a)
+ ret double %elt.trunc
+}
diff --git a/llvm/test/CodeGen/DirectX/umax.ll b/llvm/test/CodeGen/DirectX/umax.ll
index c7b6a87..be0f557 100644
--- a/llvm/test/CodeGen/DirectX/umax.ll
+++ b/llvm/test/CodeGen/DirectX/umax.ll
@@ -1,30 +1,31 @@
; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
-; Make sure dxil operation function calls for umax are generated for i32/i64.
+; Make sure dxil operation function calls for umax are generated for i16/i32/i64.
-target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
-target triple = "dxil-pc-shadermodel6.7-library"
+; CHECK-LABEL:test_umax_i16
+define noundef i16 @test_umax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 39, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
; CHECK-LABEL:test_umax_i32
-; Function Attrs: noinline nounwind optnone
-define noundef i32 @test_umax_i32(i32 noundef %a, i32 noundef %b) #0 {
+define noundef i32 @test_umax_i32(i32 noundef %a, i32 noundef %b) {
entry:
-; CHECK:call i32 @dx.op.binary.i32(i32 39, i32 %{{.*}}, i32 %{{.*}})
+; CHECK: call i32 @dx.op.binary.i32(i32 39, i32 %{{.*}}, i32 %{{.*}})
%0 = call i32 @llvm.umax.i32(i32 %a, i32 %b)
ret i32 %0
}
; CHECK-LABEL:test_umax_i64
-define noundef i64 @test_umax_i64(i64 noundef %a, i64 noundef %b) #0 {
+define noundef i64 @test_umax_i64(i64 noundef %a, i64 noundef %b) {
entry:
-; CHECK:call i64 @dx.op.binary.i64(i32 39, i64 %{{.*}}, i64 %{{.*}})
+; CHECK: call i64 @dx.op.binary.i64(i32 39, i64 %{{.*}}, i64 %{{.*}})
%0 = call i64 @llvm.umax.i64(i64 %a, i64 %b)
ret i64 %0
}
-; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
-declare i32 @llvm.umax.i32(i32, i32) #1
-declare i64 @llvm.umax.i64(i64, i64) #1
-
-attributes #0 = { noinline nounwind }
-attributes #1 = { nocallback nofree nosync nounwind readnone speculatable willreturn }
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/DirectX/umin.ll b/llvm/test/CodeGen/DirectX/umin.ll
new file mode 100644
index 0000000..5051c71
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/umin.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for umin are generated for i16/i32/i64.
+
+; CHECK-LABEL:test_umin_i16
+define noundef i16 @test_umin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: call i16 @dx.op.binary.i16(i32 40, i16 %{{.*}}, i16 %{{.*}})
+ %0 = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+; CHECK-LABEL:test_umin_i32
+define noundef i32 @test_umin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: call i32 @dx.op.binary.i32(i32 40, i32 %{{.*}}, i32 %{{.*}})
+ %0 = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+; CHECK-LABEL:test_umin_i64
+define noundef i64 @test_umin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: call i64 @dx.op.binary.i64(i32 40, i64 %{{.*}}, i64 %{{.*}})
+ %0 = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/Generic/ForceStackAlign.ll b/llvm/test/CodeGen/Generic/ForceStackAlign.ll
index 2c35ad3..7993b3e 100644
--- a/llvm/test/CodeGen/Generic/ForceStackAlign.ll
+++ b/llvm/test/CodeGen/Generic/ForceStackAlign.ll
@@ -8,7 +8,7 @@
; Stack realignment not supported.
; XFAIL: target=sparc{{.*}}
-; NVPTX cannot select dynamic_stackalloc
+; NVPTX can only select dynamic_stackalloc on sm_52+ and with ptx73+
; XFAIL: target=nvptx{{.*}}
define i32 @f(ptr %p) nounwind {
diff --git a/llvm/test/CodeGen/Generic/allow-check.ll b/llvm/test/CodeGen/Generic/allow-check.ll
new file mode 100644
index 0000000..43dab68
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/allow-check.ll
@@ -0,0 +1,31 @@
+; Avoid `!DL->isLittleEndian() && !CLI->enableBigEndian()` missmatch on PPC64BE.
+; REQUIRES: host-byteorder-little-endian
+
+; -global-isel=1 is unsupported.
+; XFAIL: target=nvptx{{.*}}
+; XFAIL: target=sparc{{.*}}
+; XFAIL: target=hexagon-{{.*}}
+
+; RUN: llc < %s -O3 -global-isel=0 -fast-isel=0
+; RUN: llc < %s -O3 -global-isel=1 -fast-isel=0
+; RUN: llc < %s -O3 -global-isel=0 -fast-isel=1
+
+; RUN: llc < %s -O0 -global-isel=0 -fast-isel=0
+; RUN: llc < %s -O0 -global-isel=1 -fast-isel=0
+; RUN: llc < %s -O0 -global-isel=0 -fast-isel=1
+
+define i1 @test_runtime() local_unnamed_addr {
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/Generic/builtin-hot.ll b/llvm/test/CodeGen/Generic/builtin-hot.ll
deleted file mode 100644
index 449f58d..0000000
--- a/llvm/test/CodeGen/Generic/builtin-hot.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -o - %s | FileCheck %s
-
-; REQUIRES: aarch64-registered-target
-
-target triple = "aarch64-linux"
-
-define i1 @test() {
-; CHECK-LABEL: test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: ret
-entry:
- %hot = call i1 @llvm.experimental.hot()
- ret i1 %hot
-}
-
-declare i1 @llvm.expect.hot() nounwind
-
diff --git a/llvm/test/CodeGen/Generic/gc-lowering.ll b/llvm/test/CodeGen/Generic/gc-lowering.ll
new file mode 100644
index 0000000..fa2e92a
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/gc-lowering.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes='require<collector-metadata>,function(gc-lowering)' < %s | FileCheck %s
+
+declare ptr @llvm_gc_allocate(i32)
+declare void @llvm_gc_initialize(i32)
+
+declare void @llvm.gcroot(ptr, ptr)
+declare void @llvm.gcwrite(ptr, ptr, ptr)
+
+define i32 @main() gc "shadow-stack" {
+; CHECK-LABEL: define i32 @main() gc "shadow-stack" {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr null, ptr [[A]], align 8
+; CHECK-NEXT: [[B:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr null, ptr [[B]], align 8
+; CHECK-NEXT: call void @llvm_gc_initialize(i32 1048576)
+; CHECK-NEXT: call void @llvm.gcroot(ptr [[A]], ptr null)
+; CHECK-NEXT: [[APTR:%.*]] = call ptr @llvm_gc_allocate(i32 10)
+; CHECK-NEXT: store ptr [[APTR]], ptr [[A]], align 8
+; CHECK-NEXT: call void @llvm.gcroot(ptr [[B]], ptr null)
+; CHECK-NEXT: [[B_UPGRD_1:%.*]] = call ptr @llvm_gc_allocate(i32 8)
+; CHECK-NEXT: store ptr [[B_UPGRD_1]], ptr [[B]], align 8
+; CHECK-NEXT: [[B_1:%.*]] = load ptr, ptr [[B]], align 8
+; CHECK-NEXT: [[A_1:%.*]] = load ptr, ptr [[A]], align 8
+; CHECK-NEXT: store ptr [[A_1]], ptr [[B_1]], align 8
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %A = alloca ptr
+ %B = alloca ptr
+
+ call void @llvm_gc_initialize(i32 1048576) ; Start with 1MB heap
+
+ ;; ptr A;
+ call void @llvm.gcroot(ptr %A, ptr null)
+
+ ;; A = gcalloc(10);
+ %Aptr = call ptr @llvm_gc_allocate(i32 10)
+ store ptr %Aptr, ptr %A
+
+ ;; ptr B;
+ call void @llvm.gcroot(ptr %B, ptr null)
+
+ ;; B = gcalloc(4);
+ %B.upgrd.1 = call ptr @llvm_gc_allocate(i32 8)
+ store ptr %B.upgrd.1, ptr %B
+
+ ;; *B = A;
+ %B.1 = load ptr, ptr %B
+ %A.1 = load ptr, ptr %A
+ call void @llvm.gcwrite(ptr %A.1, ptr %B.upgrd.1, ptr %B.1)
+
+ ret i32 0
+}
+
+define void @no_gc() {
+; CHECK-LABEL: define void @no_gc() {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
diff --git a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
index 3069cbe..1412d31 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
+++ b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
@@ -15,7 +15,7 @@
; Function Attrs: norecurse
define void @f0() #0 {
b0:
- %v0 = load ptr, ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1)), align 4
+ %v0 = load ptr, ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1)), align 4
%v1 = call i32 %v0(ptr nonnull undef)
unreachable
}
@@ -33,7 +33,7 @@ tracksRegLiveness: true
body: |
bb.0.b0:
$r2 = A2_tfrsi @g0 + 12
- $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1))`)
+ $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1))`)
ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29
PS_callr_nr killed $r2, hexagoncsr, implicit undef $r0, implicit-def $r29, implicit-def dead $r0
ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29
diff --git a/llvm/test/CodeGen/Hexagon/build-attributes.ll b/llvm/test/CodeGen/Hexagon/build-attributes.ll
new file mode 100644
index 0000000..48ee31a
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/build-attributes.ll
@@ -0,0 +1,16 @@
+;; Generate build attributes from llc.
+
+; RUN: llc -mtriple=hexagon-unknown-elf \
+; RUN: -mattr=+hvxv73,+cabac,+v71,+hvx-ieee-fp,+hvx-length128b %s -o - | FileCheck %s
+
+; CHECK: .attribute 4, 71 // Tag_arch
+; CHECK-NEXT: .attribute 5, 73 // Tag_hvx_arch
+; CHECK-NEXT: .attribute 6, 1 // Tag_hvx_ieeefp
+; CHECK-NEXT: .attribute 7, 1 // Tag_hvx_qfloat
+; CHECK-NEXT: .attribute 8, 1 // Tag_zreg
+; CHECK-NEXT: .attribute 10, 1 // Tag_cabac
+
+define i32 @addi(i32 %a) {
+ %1 = add i32 %a, 1
+ ret i32 %1
+} \ No newline at end of file
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
index 0771fda..7ccee16 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK: %.hexagon.vlcr = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B
; ModuleID = 'hexagon_vector_loop_carried_reuse.c'
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
index 25afb9f..532f7fd 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -march=hexagon -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK: %v32.hexagon.vlcr = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
index 5397342..ecfcf53 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK-NOT: %.hexagon.vlcr
; ModuleID = 'hexagon_vector_loop_carried_reuse.c'
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
index b440dba..9872fae 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
@@ -1,4 +1,4 @@
-; RUN: opt -hexagon-vlcr < %s -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes=hexagon-vlcr -S %s | FileCheck %s
; Test that reuse doesn't occur due to two shufflevectors with different masks.
diff --git a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
index ab7bf1b..c53e578 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -p hexagon-loop-idiom -disable-memcpy-idiom -S < %s | FileCheck %s
; Make sure we don't convert load/store loops into memcpy if the access type
; is a vector. Using vector instructions is generally better in such cases.
diff --git a/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir b/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir
index 8f1cb42..5221307 100644
--- a/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir
+++ b/llvm/test/CodeGen/Hexagon/livephysregs-regmask-clobber.mir
@@ -17,6 +17,8 @@
name: f0
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, offset: 0, size: 128, alignment: 128 }
- { id: 1, offset: 128, size: 128, alignment: 128 }
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
index c711026..5ace9e6 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
@@ -1,6 +1,8 @@
; Check for recognizing the "memmove" idiom.
; RUN: opt -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
; CHECK: call void @llvm.memmove
; Function Attrs: norecurse nounwind
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
index 234e4f5..ed56a33 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
@@ -1,5 +1,7 @@
; RUN: opt -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
define void @PR14241(ptr %s, i64 %size) #0 {
; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
index 140c676..e5bcc2b 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -hexagon-loop-idiom < %s | opt -S -passes='loop(loop-deletion),gvn'
+; RUN: opt -mtriple hexagon-- -S -passes='loop(hexagon-loop-idiom,loop-deletion),gvn'
; REQUIRES: asserts
; This tests that the HexagonLoopIdiom pass does not mark LCSSA information
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
index 7a7d1d9..78f0c9e 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
@@ -1,4 +1,5 @@
; RUN: opt -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S < %s | FileCheck %s
; Make sure that we generate correct runtime checks.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
index 37e1bb6..ce02b62 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
@@ -1,4 +1,5 @@
; RUN: opt -hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s
+; RUN: opt -p hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s
; REQUIRES: asserts
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
index 1934ced..74c02d6 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
; CHECK-LABEL: define void @fred
; Check that this test does not crash.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
index b25010f..94b0c96 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
;
; The number of nested selects caused the simplification loop to take
; more than the maximum number of iterations. This caused the compiler
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
index e4b2b5a..a00b1d5 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
; REQUIRES: asserts
;
; Check for sane output, this used to crash.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
index 781618e..2461e1c 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
@@ -1,5 +1,7 @@
; RUN: opt -hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \
+; RUN: | FileCheck %s
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
index 67f4dd7..9468b18 100644
--- a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
+++ b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
@@ -135,7 +135,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 0
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
maxCallFrameSize: 0
hasOpaqueSPAdjustment: false
diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
index 92669d2..0d9f9da 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
@@ -36,33 +36,3 @@ entry:
%res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 1)
ret <32 x i8> %res
}
-
-define <32 x i8> @lasx_xvpermi_q_204(<32 x i8> %va, <32 x i8> %vb) nounwind {
-; CHECK-LABEL: lasx_xvpermi_q_204:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpermi.q $xr0, $xr1, 0
-; CHECK-NEXT: ret
-entry:
- %res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 204)
- ret <32 x i8> %res
-}
-
-define <32 x i8> @lasx_xvpermi_q_221(<32 x i8> %va, <32 x i8> %vb) nounwind {
-; CHECK-LABEL: lasx_xvpermi_q_221:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpermi.q $xr0, $xr1, 17
-; CHECK-NEXT: ret
-entry:
- %res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 221)
- ret <32 x i8> %res
-}
-
-define <32 x i8> @lasx_xvpermi_q_255(<32 x i8> %va, <32 x i8> %vb) nounwind {
-; CHECK-LABEL: lasx_xvpermi_q_255:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpermi.q $xr0, $xr1, 51
-; CHECK-NEXT: ret
-entry:
- %res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 255)
- ret <32 x i8> %res
-}
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
index 25106b4..6629d34 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
@@ -123,9 +123,10 @@ define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 0
+; CHECK-NEXT: st.b $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -149,9 +150,10 @@ define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 1
+; CHECK-NEXT: st.h $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -175,9 +177,10 @@ define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 2
+; CHECK-NEXT: st.w $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -201,9 +204,10 @@ define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 3
+; CHECK-NEXT: st.d $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -227,9 +231,10 @@ define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwin
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 4, 2
+; CHECK-NEXT: fst.s $fa0, $a2, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -253,9 +258,10 @@ define void @insert_4xdouble_idx(ptr %src, ptr %dst, double %in, i32 %idx) nounw
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 4, 3
+; CHECK-NEXT: fst.d $fa0, $a2, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
index 7f23207..19171b7 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
@@ -87,9 +87,10 @@ define void @insert_16xi8_idx(ptr %src, ptr %dst, i8 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 0
+; CHECK-NEXT: st.b $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -106,9 +107,10 @@ define void @insert_8xi16_idx(ptr %src, ptr %dst, i16 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 1
+; CHECK-NEXT: st.h $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -125,9 +127,10 @@ define void @insert_4xi32_idx(ptr %src, ptr %dst, i32 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 2
+; CHECK-NEXT: st.w $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -144,9 +147,10 @@ define void @insert_2xi64_idx(ptr %src, ptr %dst, i64 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 3
+; CHECK-NEXT: st.d $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -163,9 +167,10 @@ define void @insert_4xfloat_idx(ptr %src, ptr %dst, float %ins, i32 %idx) nounwi
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr1, $a0, 0
; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 3, 2
+; CHECK-NEXT: fst.s $fa0, $a2, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -182,9 +187,10 @@ define void @insert_2xdouble_idx(ptr %src, ptr %dst, double %ins, i32 %idx) noun
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr1, $a0, 0
; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 3, 3
+; CHECK-NEXT: fst.d $fa0, $a2, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
deleted file mode 100644
index d66dd10..0000000
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir
+++ /dev/null
@@ -1,10 +0,0 @@
-# RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is 0 bits
----
-name: test_scalar_size_0
-body: |
- bb.0:
- liveins: $x0
- ; CHECK: [[@LINE+1]]:10: invalid size for scalar type
- %0:_(s0) = G_IMPLICIT_DEF
-...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
index 6985687..632e5fa 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir
@@ -5,6 +5,6 @@ name: test_vector_element_size_0
body: |
bb.0:
liveins: $x0
- ; CHECK: [[@LINE+1]]:15: invalid size for scalar type
+ ; CHECK: [[@LINE+1]]:15: invalid size for scalar element in vector
%0:_(<2 x s0>) = G_IMPLICIT_DEF
...
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir b/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
index e40d187..9831f78 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
@@ -29,6 +29,8 @@ liveins:
- { reg: '$vgpr0', virtual-reg: '' }
- { reg: '$vgpr1', virtual-reg: '' }
- { reg: '$vgpr2', virtual-reg: '' }
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4,
stack-id: sgpr-spill, callee-saved-register: '', callee-saved-restored: true,
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir
index 6438893..dc99ce8 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
--- |
declare void @llvm.trap()
@@ -9,12 +9,15 @@
---
name: f
alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
; MIPS32-LABEL: name: f
; MIPS32: TRAP
- ; MIPS32: RetRA
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; MIPS32-NEXT: RetRA
+ G_TRAP
RetRA
...
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
index 52352ed..e471e10 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
@@ -220,10 +220,12 @@ body: |
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY]]
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD2]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY4]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
@@ -268,6 +270,7 @@ body: |
; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load (s32) from %fixed-stack.3)
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[COPY]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[COPY1]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[LOAD1]]
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
@@ -275,6 +278,7 @@ body: |
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
; MIPS32-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LOAD2]], [[COPY2]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[LOAD2]]
; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -283,13 +287,15 @@ body: |
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD4]](s32), [[C]]
; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP4]], [[OR]]
; MIPS32-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ICMP3]], [[AND2]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[LOAD3]], [[COPY3]]
; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C1]]
; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[AND3]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD2]](s32)
- ; MIPS32-NEXT: $a0 = COPY [[ADD4]](s32)
- ; MIPS32-NEXT: $a1 = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY4]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $a0 = COPY [[COPY6]](s32)
+ ; MIPS32-NEXT: $a1 = COPY [[COPY7]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
@@ -331,10 +337,11 @@ body: |
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $a3
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
; MIPS32-NEXT: G_STORE [[AND]](s32), [[COPY3]](p0) :: (store (s8) into %ir.pcarry_flag)
- ; MIPS32-NEXT: G_STORE [[ADD]](s32), [[COPY2]](p0) :: (store (s32) into %ir.padd)
+ ; MIPS32-NEXT: G_STORE [[COPY4]](s32), [[COPY2]](p0) :: (store (s32) into %ir.padd)
; MIPS32-NEXT: RetRA
%0:_(s32) = COPY $a0
%1:_(s32) = COPY $a1
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
index 136c039..f518e9e 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
@@ -10,29 +10,30 @@ body: |
; MIPS32-LABEL: name: ctpop_i32
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
- ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
- ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
- ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
- ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
- ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
- ; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
- ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
- ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
- ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
- ; MIPS32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
- ; MIPS32: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
- ; MIPS32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; MIPS32: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
- ; MIPS32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; MIPS32: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
- ; MIPS32: $v0 = COPY [[LSHR3]](s32)
- ; MIPS32: RetRA implicit $v0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+ ; MIPS32-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
+ ; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; MIPS32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[LSHR3]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0
%0:_(s32) = COPY $a0
%1:_(s32) = G_CTPOP %0(s32)
$v0 = COPY %1(s32)
@@ -49,45 +50,46 @@ body: |
; MIPS32-LABEL: name: ctpop_i64
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
- ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
- ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
- ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
- ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
- ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
- ; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
- ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
- ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
- ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
- ; MIPS32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
- ; MIPS32: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
- ; MIPS32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; MIPS32: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
- ; MIPS32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; MIPS32: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
- ; MIPS32: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C1]]
- ; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[AND4]]
- ; MIPS32: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C2]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
- ; MIPS32: [[AND6:%[0-9]+]]:_(s32) = G_AND [[SUB1]], [[C3]]
- ; MIPS32: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND5]], [[AND6]]
- ; MIPS32: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C4]](s32)
- ; MIPS32: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR6]], [[ADD2]]
- ; MIPS32: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C5]]
- ; MIPS32: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C6]]
- ; MIPS32: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C7]](s32)
- ; MIPS32: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
- ; MIPS32: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; MIPS32: $v0 = COPY [[ADD4]](s32)
- ; MIPS32: $v1 = COPY [[C8]](s32)
- ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+ ; MIPS32-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
+ ; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; MIPS32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
+ ; MIPS32-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C1]]
+ ; MIPS32-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[AND4]]
+ ; MIPS32-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C2]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[SUB1]], [[C3]]
+ ; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND5]], [[AND6]]
+ ; MIPS32-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR6]], [[ADD2]]
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C5]]
+ ; MIPS32-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C7]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
+ ; MIPS32-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; MIPS32-NEXT: $v0 = COPY [[ADD4]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[C8]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%1:_(s32) = COPY $a0
%2:_(s32) = COPY $a1
%0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
index 3e7bcdc..a06bb6d 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
@@ -139,9 +139,11 @@ body: |
; MIPS32-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD1]], [[SUB1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[C]]
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[C1]], [[C1]]
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ICMP1]]
- ; MIPS32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ADD3]](s32), [[ADD5]](s32)
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+ ; MIPS32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
; MIPS32-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[C1]]
; MIPS32-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[C1]]
; MIPS32-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[XOR2]], [[XOR3]]
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
index 7ad286b..674d7b6 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
@@ -275,8 +275,10 @@ body: |
; MIPS32-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[LOAD]], [[COPY]]
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL2]]
- ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; MIPS32-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[LOAD2]], [[COPY]]
; MIPS32-NEXT: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[LOAD1]], [[COPY1]]
@@ -285,17 +287,22 @@ body: |
; MIPS32-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[LOAD]], [[COPY1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL3]], [[MUL4]]
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[MUL4]]
- ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[MUL5]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY6]], [[MUL5]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[MUL5]]
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ICMP2]], [[ICMP3]]
- ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH1]]
+ ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY7]], [[UMULH1]]
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD6]](s32), [[UMULH1]]
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
; MIPS32-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ICMP4]]
- ; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[ADD6]], [[UMULH2]]
+ ; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[COPY8]], [[UMULH2]]
; MIPS32-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD8]](s32), [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[ADD8]](s32)
; MIPS32-NEXT: [[ADD9:%[0-9]+]]:_(s32) = G_ADD [[ADD7]], [[ICMP5]]
- ; MIPS32-NEXT: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[ADD8]], [[ADD2]]
+ ; MIPS32-NEXT: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[COPY9]], [[ADD2]]
; MIPS32-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD10]](s32), [[ADD2]]
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[ADD10]](s32)
; MIPS32-NEXT: [[ADD11:%[0-9]+]]:_(s32) = G_ADD [[ADD9]], [[ICMP6]]
; MIPS32-NEXT: [[MUL6:%[0-9]+]]:_(s32) = G_MUL [[LOAD3]], [[COPY]]
; MIPS32-NEXT: [[MUL7:%[0-9]+]]:_(s32) = G_MUL [[LOAD2]], [[COPY1]]
@@ -312,8 +319,8 @@ body: |
; MIPS32-NEXT: [[ADD17:%[0-9]+]]:_(s32) = G_ADD [[ADD16]], [[UMULH5]]
; MIPS32-NEXT: [[ADD18:%[0-9]+]]:_(s32) = G_ADD [[ADD17]], [[ADD11]]
; MIPS32-NEXT: $v0 = COPY [[MUL]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD1]](s32)
- ; MIPS32-NEXT: $a0 = COPY [[ADD10]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $a0 = COPY [[COPY10]](s32)
; MIPS32-NEXT: $a1 = COPY [[ADD18]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
%2:_(s32) = COPY $a0
@@ -359,23 +366,28 @@ body: |
; MIPS32-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY2]], [[COPY]]
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[MUL1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL1]]
- ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; MIPS32-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[COPY3]], [[COPY]]
; MIPS32-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[COPY2]], [[COPY1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL2]], [[UMULH1]]
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[UMULH1]]
- ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY6]], [[UMULH2]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ICMP2]], [[ICMP3]]
- ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ADD2]]
+ ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY7]], [[ADD2]]
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD6]](s32), [[ADD2]]
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
; MIPS32-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ICMP4]]
; MIPS32-NEXT: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD7]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY8]](s32)
; MIPS32-NEXT: $v1 = COPY [[ADD8]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%2:_(s32) = COPY $a0
diff --git a/llvm/test/CodeGen/Mips/atomic-min-max.ll b/llvm/test/CodeGen/Mips/atomic-min-max.ll
index bc3643f..a96581bd 100644
--- a/llvm/test/CodeGen/Mips/atomic-min-max.ll
+++ b/llvm/test/CodeGen/Mips/atomic-min-max.ll
@@ -2146,6 +2146,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS32-NEXT: $BB6_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movn $3, $7, $5
@@ -2186,6 +2188,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSEL-NEXT: $BB6_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movn $3, $7, $5
@@ -2225,6 +2229,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSELR6-NEXT: $BB6_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: seleqz $3, $2, $5
; MIPSELR6-NEXT: selnez $5, $7, $5
@@ -2263,6 +2269,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MMEL-NEXT: $BB6_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movn $3, $7, $5
@@ -2300,6 +2308,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MMELR6-NEXT: $BB6_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: seleqz $3, $2, $5
; MMELR6-NEXT: selnez $5, $7, $5
@@ -2417,6 +2427,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64EL-NEXT: .LBB6_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movn $3, $7, $5
@@ -2456,6 +2468,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64ELR6-NEXT: .LBB6_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: seleqz $3, $2, $5
; MIPS64ELR6-NEXT: selnez $5, $7, $5
@@ -2655,6 +2669,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS32-NEXT: $BB7_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movz $3, $7, $5
@@ -2696,6 +2712,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSEL-NEXT: $BB7_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movz $3, $7, $5
@@ -2735,6 +2753,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSELR6-NEXT: $BB7_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: selnez $3, $2, $5
; MIPSELR6-NEXT: seleqz $5, $7, $5
@@ -2773,6 +2793,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MMEL-NEXT: $BB7_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movz $3, $7, $5
@@ -2810,6 +2832,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MMELR6-NEXT: $BB7_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: selnez $3, $2, $5
; MMELR6-NEXT: seleqz $5, $7, $5
@@ -2927,6 +2951,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64EL-NEXT: .LBB7_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movz $3, $7, $5
@@ -2966,6 +2992,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64ELR6-NEXT: .LBB7_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: selnez $3, $2, $5
; MIPS64ELR6-NEXT: seleqz $5, $7, $5
@@ -4244,6 +4272,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS32-NEXT: $BB10_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movn $3, $7, $5
@@ -4284,6 +4314,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSEL-NEXT: $BB10_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movn $3, $7, $5
@@ -4323,6 +4355,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSELR6-NEXT: $BB10_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: seleqz $3, $2, $5
; MIPSELR6-NEXT: selnez $5, $7, $5
@@ -4361,6 +4395,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MMEL-NEXT: $BB10_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movn $3, $7, $5
@@ -4398,6 +4434,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MMELR6-NEXT: $BB10_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: seleqz $3, $2, $5
; MMELR6-NEXT: selnez $5, $7, $5
@@ -4515,6 +4553,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64EL-NEXT: .LBB10_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movn $3, $7, $5
@@ -4554,6 +4594,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64ELR6-NEXT: .LBB10_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: seleqz $3, $2, $5
; MIPS64ELR6-NEXT: selnez $5, $7, $5
@@ -4753,6 +4795,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS32-NEXT: $BB11_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movz $3, $7, $5
@@ -4793,6 +4837,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSEL-NEXT: $BB11_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movz $3, $7, $5
@@ -4832,6 +4878,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSELR6-NEXT: $BB11_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: selnez $3, $2, $5
; MIPSELR6-NEXT: seleqz $5, $7, $5
@@ -4870,6 +4918,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MMEL-NEXT: $BB11_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movz $3, $7, $5
@@ -4907,6 +4957,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MMELR6-NEXT: $BB11_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: selnez $3, $2, $5
; MMELR6-NEXT: seleqz $5, $7, $5
@@ -5024,6 +5076,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64EL-NEXT: .LBB11_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movz $3, $7, $5
@@ -5063,6 +5117,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64ELR6-NEXT: .LBB11_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: selnez $3, $2, $5
; MIPS64ELR6-NEXT: seleqz $5, $7, $5
diff --git a/llvm/test/CodeGen/Mips/avoid-zero-copy.mir b/llvm/test/CodeGen/Mips/avoid-zero-copy.mir
index 5c7cffd..e3990bd 100644
--- a/llvm/test/CodeGen/Mips/avoid-zero-copy.mir
+++ b/llvm/test/CodeGen/Mips/avoid-zero-copy.mir
@@ -19,6 +19,8 @@
...
---
name: a
+frameInfo:
+ adjustsStack: true
body: |
bb.0 (%ir-block.0):
liveins: $a0_64, $t9_64, $ra_64, $fp_64, $gp_64
diff --git a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
index e1c7b21..2089464 100644
--- a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
+++ b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
@@ -90,7 +90,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 16
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
index 6a27c9f..45c7ab9 100644
--- a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
+++ b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
@@ -405,8 +405,9 @@ define void @uitofp(i32 %a) {
; MIPS64-N32-NEXT: addiu $1, $1, %lo(%neg(%gp_rel(uitofp)))
; MIPS64-N32-NEXT: lui $2, 17200
; MIPS64-N32-NEXT: sw $2, 12($sp)
-; MIPS64-N32-NEXT: sll $2, $4, 0
-; MIPS64-N32-NEXT: sw $2, 8($sp)
+; MIPS64R5-N32-NEXT: sll $2, $4, 0
+; MIPS64R5-N32-NEXT: sw $2, 8($sp)
+; MIPSR6-N32-NEXT: sw $4, 8($sp)
; MIPS64-N32-NEXT: lw $2, %got_page(.LCPI5_0)($1)
; MIPS64-N32-NEXT: ldc1 $f0, %got_ofst(.LCPI5_0)($2)
; MIPS64-N32-NEXT: ldc1 $f1, 8($sp)
@@ -430,8 +431,9 @@ define void @uitofp(i32 %a) {
; MIPS64-N64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(uitofp)))
; MIPS64-N64-NEXT: lui $2, 17200
; MIPS64-N64-NEXT: sw $2, 12($sp)
-; MIPS64-N64-NEXT: sll $2, $4, 0
-; MIPS64-N64-NEXT: sw $2, 8($sp)
+; MIPS64R5-N64-NEXT: sll $2, $4, 0
+; MIPS64R5-N64-NEXT: sw $2, 8($sp)
+; MIPSR6-N64-NEXT: sw $4, 8($sp)
; MIPS64-N64-NEXT: ld $2, %got_page(.LCPI5_0)($1)
; MIPS64-N64-NEXT: ldc1 $f0, %got_ofst(.LCPI5_0)($2)
; MIPS64-N64-NEXT: ldc1 $f1, 8($sp)
diff --git a/llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll b/llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll
new file mode 100644
index 0000000..0695868
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/no-unaligned-access-r6.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+;; Test the strict-align feature which is similar to AArch64/arm64-strict-align.ll.
+
+; RUN: llc --mtriple=mipsisa32r6 < %s | FileCheck %s --check-prefix=MIPS32R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa32r6 --mattr=-strict-align < %s | FileCheck %s --check-prefix=MIPS32R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa32r6 --mattr=+strict-align < %s | FileCheck %s --check-prefix=MIPS32R6-ALIGNED
+
+; RUN: llc --mtriple=mipsisa64r6 < %s | FileCheck %s --check-prefix=MIPS64R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa64r6 --mattr=-strict-align < %s | FileCheck %s --check-prefix=MIPS64R6-UNALIGNED
+; RUN: llc --mtriple=mipsisa64r6 --mattr=+strict-align < %s | FileCheck %s --check-prefix=MIPS64R6-ALIGNED
+
+define i32 @f0(ptr %p) nounwind {
+; MIPS32R6-UNALIGNED-LABEL: f0:
+; MIPS32R6-UNALIGNED: # %bb.0:
+; MIPS32R6-UNALIGNED-NEXT: lw $2, 0($4)
+; MIPS32R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS32R6-ALIGNED-LABEL: f0:
+; MIPS32R6-ALIGNED: # %bb.0:
+; MIPS32R6-ALIGNED-NEXT: lhu $1, 2($4)
+; MIPS32R6-ALIGNED-NEXT: lhu $2, 0($4)
+; MIPS32R6-ALIGNED-NEXT: sll $2, $2, 16
+; MIPS32R6-ALIGNED-NEXT: jr $ra
+; MIPS32R6-ALIGNED-NEXT: or $2, $2, $1
+;
+; MIPS64R6-UNALIGNED-LABEL: f0:
+; MIPS64R6-UNALIGNED: # %bb.0:
+; MIPS64R6-UNALIGNED-NEXT: lw $2, 0($4)
+; MIPS64R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS64R6-ALIGNED-LABEL: f0:
+; MIPS64R6-ALIGNED: # %bb.0:
+; MIPS64R6-ALIGNED-NEXT: lhu $1, 2($4)
+; MIPS64R6-ALIGNED-NEXT: lhu $2, 0($4)
+; MIPS64R6-ALIGNED-NEXT: sll $2, $2, 16
+; MIPS64R6-ALIGNED-NEXT: jr $ra
+; MIPS64R6-ALIGNED-NEXT: or $2, $2, $1
+ %tmp = load i32, ptr %p, align 2
+ ret i32 %tmp
+}
+
+define i64 @f1(ptr %p) nounwind {
+; MIPS32R6-UNALIGNED-LABEL: f1:
+; MIPS32R6-UNALIGNED: # %bb.0:
+; MIPS32R6-UNALIGNED-NEXT: lw $2, 0($4)
+; MIPS32R6-UNALIGNED-NEXT: lw $3, 4($4)
+; MIPS32R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS32R6-ALIGNED-LABEL: f1:
+; MIPS32R6-ALIGNED: # %bb.0:
+; MIPS32R6-ALIGNED-NEXT: lw $2, 0($4)
+; MIPS32R6-ALIGNED-NEXT: lw $3, 4($4)
+; MIPS32R6-ALIGNED-NEXT: jrc $ra
+;
+; MIPS64R6-UNALIGNED-LABEL: f1:
+; MIPS64R6-UNALIGNED: # %bb.0:
+; MIPS64R6-UNALIGNED-NEXT: ld $2, 0($4)
+; MIPS64R6-UNALIGNED-NEXT: jrc $ra
+;
+; MIPS64R6-ALIGNED-LABEL: f1:
+; MIPS64R6-ALIGNED: # %bb.0:
+; MIPS64R6-ALIGNED-NEXT: lwu $1, 4($4)
+; MIPS64R6-ALIGNED-NEXT: lwu $2, 0($4)
+; MIPS64R6-ALIGNED-NEXT: dsll $2, $2, 32
+; MIPS64R6-ALIGNED-NEXT: jr $ra
+; MIPS64R6-ALIGNED-NEXT: or $2, $2, $1
+ %tmp = load i64, ptr %p, align 4
+ ret i64 %tmp
+}
diff --git a/llvm/test/CodeGen/NVPTX/atomics-sm70.ll b/llvm/test/CodeGen/NVPTX/atomics-sm70.ll
new file mode 100644
index 0000000..9cc45fb
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/atomics-sm70.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx63 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx63 | FileCheck %s --check-prefixes=CHECK64
+; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx62 | FileCheck %s --check-prefixes=CHECKPTX62
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx63 | %ptxas-verify -arch=sm_70 %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx63 | %ptxas-verify -arch=sm_70 %}
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx62 | %ptxas-verify -arch=sm_70 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+define void @test(ptr %dp0, ptr addrspace(1) %dp1, ptr addrspace(3) %dp3, half %val) {
+; CHECK-LABEL: test(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<7>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [test_param_0];
+; CHECK-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECK-NEXT: atom.add.noftz.f16 %rs2, [%r1], %rs1;
+; CHECK-NEXT: ld.param.u32 %r2, [test_param_1];
+; CHECK-NEXT: mov.b16 %rs3, 0x3C00;
+; CHECK-NEXT: atom.add.noftz.f16 %rs4, [%r1], %rs3;
+; CHECK-NEXT: ld.param.u32 %r3, [test_param_2];
+; CHECK-NEXT: atom.global.add.noftz.f16 %rs5, [%r2], %rs1;
+; CHECK-NEXT: atom.shared.add.noftz.f16 %rs6, [%r3], %rs1;
+; CHECK-NEXT: ret;
+;
+; CHECK64-LABEL: test(
+; CHECK64: {
+; CHECK64-NEXT: .reg .b16 %rs<7>;
+; CHECK64-NEXT: .reg .b64 %rd<4>;
+; CHECK64-EMPTY:
+; CHECK64-NEXT: // %bb.0:
+; CHECK64-NEXT: ld.param.u64 %rd1, [test_param_0];
+; CHECK64-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECK64-NEXT: atom.add.noftz.f16 %rs2, [%rd1], %rs1;
+; CHECK64-NEXT: ld.param.u64 %rd2, [test_param_1];
+; CHECK64-NEXT: mov.b16 %rs3, 0x3C00;
+; CHECK64-NEXT: atom.add.noftz.f16 %rs4, [%rd1], %rs3;
+; CHECK64-NEXT: ld.param.u64 %rd3, [test_param_2];
+; CHECK64-NEXT: atom.global.add.noftz.f16 %rs5, [%rd2], %rs1;
+; CHECK64-NEXT: atom.shared.add.noftz.f16 %rs6, [%rd3], %rs1;
+; CHECK64-NEXT: ret;
+;
+; CHECKPTX62-LABEL: test(
+; CHECKPTX62: {
+; CHECKPTX62-NEXT: .reg .pred %p<5>;
+; CHECKPTX62-NEXT: .reg .b16 %rs<19>;
+; CHECKPTX62-NEXT: .reg .b32 %r<58>;
+; CHECKPTX62-EMPTY:
+; CHECKPTX62-NEXT: // %bb.0:
+; CHECKPTX62-NEXT: ld.param.b16 %rs1, [test_param_3];
+; CHECKPTX62-NEXT: ld.param.u32 %r23, [test_param_2];
+; CHECKPTX62-NEXT: ld.param.u32 %r22, [test_param_1];
+; CHECKPTX62-NEXT: ld.param.u32 %r24, [test_param_0];
+; CHECKPTX62-NEXT: and.b32 %r1, %r24, -4;
+; CHECKPTX62-NEXT: and.b32 %r25, %r24, 3;
+; CHECKPTX62-NEXT: shl.b32 %r2, %r25, 3;
+; CHECKPTX62-NEXT: mov.b32 %r26, 65535;
+; CHECKPTX62-NEXT: shl.b32 %r27, %r26, %r2;
+; CHECKPTX62-NEXT: not.b32 %r3, %r27;
+; CHECKPTX62-NEXT: ld.u32 %r54, [%r1];
+; CHECKPTX62-NEXT: $L__BB0_1: // %atomicrmw.start
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r28, %r54, %r2;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs2, %r28;
+; CHECKPTX62-NEXT: add.rn.f16 %rs4, %rs2, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r29, %rs4;
+; CHECKPTX62-NEXT: shl.b32 %r30, %r29, %r2;
+; CHECKPTX62-NEXT: and.b32 %r31, %r54, %r3;
+; CHECKPTX62-NEXT: or.b32 %r32, %r31, %r30;
+; CHECKPTX62-NEXT: atom.cas.b32 %r6, [%r1], %r54, %r32;
+; CHECKPTX62-NEXT: setp.ne.s32 %p1, %r6, %r54;
+; CHECKPTX62-NEXT: mov.u32 %r54, %r6;
+; CHECKPTX62-NEXT: @%p1 bra $L__BB0_1;
+; CHECKPTX62-NEXT: // %bb.2: // %atomicrmw.end
+; CHECKPTX62-NEXT: ld.u32 %r55, [%r1];
+; CHECKPTX62-NEXT: $L__BB0_3: // %atomicrmw.start9
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r33, %r55, %r2;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs6, %r33;
+; CHECKPTX62-NEXT: mov.b16 %rs8, 0x3C00;
+; CHECKPTX62-NEXT: add.rn.f16 %rs9, %rs6, %rs8;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r34, %rs9;
+; CHECKPTX62-NEXT: shl.b32 %r35, %r34, %r2;
+; CHECKPTX62-NEXT: and.b32 %r36, %r55, %r3;
+; CHECKPTX62-NEXT: or.b32 %r37, %r36, %r35;
+; CHECKPTX62-NEXT: atom.cas.b32 %r9, [%r1], %r55, %r37;
+; CHECKPTX62-NEXT: setp.ne.s32 %p2, %r9, %r55;
+; CHECKPTX62-NEXT: mov.u32 %r55, %r9;
+; CHECKPTX62-NEXT: @%p2 bra $L__BB0_3;
+; CHECKPTX62-NEXT: // %bb.4: // %atomicrmw.end8
+; CHECKPTX62-NEXT: and.b32 %r10, %r22, -4;
+; CHECKPTX62-NEXT: shl.b32 %r38, %r22, 3;
+; CHECKPTX62-NEXT: and.b32 %r11, %r38, 24;
+; CHECKPTX62-NEXT: shl.b32 %r40, %r26, %r11;
+; CHECKPTX62-NEXT: not.b32 %r12, %r40;
+; CHECKPTX62-NEXT: ld.global.u32 %r56, [%r10];
+; CHECKPTX62-NEXT: $L__BB0_5: // %atomicrmw.start27
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r41, %r56, %r11;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs11, %r41;
+; CHECKPTX62-NEXT: add.rn.f16 %rs13, %rs11, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r42, %rs13;
+; CHECKPTX62-NEXT: shl.b32 %r43, %r42, %r11;
+; CHECKPTX62-NEXT: and.b32 %r44, %r56, %r12;
+; CHECKPTX62-NEXT: or.b32 %r45, %r44, %r43;
+; CHECKPTX62-NEXT: atom.global.cas.b32 %r15, [%r10], %r56, %r45;
+; CHECKPTX62-NEXT: setp.ne.s32 %p3, %r15, %r56;
+; CHECKPTX62-NEXT: mov.u32 %r56, %r15;
+; CHECKPTX62-NEXT: @%p3 bra $L__BB0_5;
+; CHECKPTX62-NEXT: // %bb.6: // %atomicrmw.end26
+; CHECKPTX62-NEXT: and.b32 %r16, %r23, -4;
+; CHECKPTX62-NEXT: shl.b32 %r46, %r23, 3;
+; CHECKPTX62-NEXT: and.b32 %r17, %r46, 24;
+; CHECKPTX62-NEXT: shl.b32 %r48, %r26, %r17;
+; CHECKPTX62-NEXT: not.b32 %r18, %r48;
+; CHECKPTX62-NEXT: ld.shared.u32 %r57, [%r16];
+; CHECKPTX62-NEXT: $L__BB0_7: // %atomicrmw.start45
+; CHECKPTX62-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECKPTX62-NEXT: shr.u32 %r49, %r57, %r17;
+; CHECKPTX62-NEXT: cvt.u16.u32 %rs15, %r49;
+; CHECKPTX62-NEXT: add.rn.f16 %rs17, %rs15, %rs1;
+; CHECKPTX62-NEXT: cvt.u32.u16 %r50, %rs17;
+; CHECKPTX62-NEXT: shl.b32 %r51, %r50, %r17;
+; CHECKPTX62-NEXT: and.b32 %r52, %r57, %r18;
+; CHECKPTX62-NEXT: or.b32 %r53, %r52, %r51;
+; CHECKPTX62-NEXT: atom.shared.cas.b32 %r21, [%r16], %r57, %r53;
+; CHECKPTX62-NEXT: setp.ne.s32 %p4, %r21, %r57;
+; CHECKPTX62-NEXT: mov.u32 %r57, %r21;
+; CHECKPTX62-NEXT: @%p4 bra $L__BB0_7;
+; CHECKPTX62-NEXT: // %bb.8: // %atomicrmw.end44
+; CHECKPTX62-NEXT: ret;
+ %r1 = atomicrmw fadd ptr %dp0, half %val seq_cst
+ %r2 = atomicrmw fadd ptr %dp0, half 1.0 seq_cst
+ %r3 = atomicrmw fadd ptr addrspace(1) %dp1, half %val seq_cst
+ %r4 = atomicrmw fadd ptr addrspace(3) %dp3, half %val seq_cst
+ ret void
+}
+
+attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/CodeGen/NVPTX/atomics.ll b/llvm/test/CodeGen/NVPTX/atomics.ll
index e99d0fd..6f2b5dc 100644
--- a/llvm/test/CodeGen/NVPTX/atomics.ll
+++ b/llvm/test/CodeGen/NVPTX/atomics.ll
@@ -175,6 +175,13 @@ define float @atomicrmw_add_f32_generic(ptr %addr, float %val) {
ret float %ret
}
+; CHECK-LABEL: atomicrmw_add_f16_generic
+define half @atomicrmw_add_f16_generic(ptr %addr, half %val) {
+; CHECK: atom.cas
+ %ret = atomicrmw fadd ptr %addr, half %val seq_cst
+ ret half %ret
+}
+
; CHECK-LABEL: atomicrmw_add_f32_addrspace1
define float @atomicrmw_add_f32_addrspace1(ptr addrspace(1) %addr, float %val) {
; CHECK: atom.global.add.f32
diff --git a/llvm/test/CodeGen/NVPTX/b52037.ll b/llvm/test/CodeGen/NVPTX/b52037.ll
index d9322da..5d1c390 100644
--- a/llvm/test/CodeGen/NVPTX/b52037.ll
+++ b/llvm/test/CodeGen/NVPTX/b52037.ll
@@ -47,7 +47,7 @@ bb:
%tmp5 = load ptr, ptr %tmp4, align 8
%tmp9 = getelementptr inbounds %struct.zot, ptr %tmp, i64 0, i32 2, i32 1
store ptr %tmp5, ptr %tmp9, align 8
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @global_1, i64 0, inrange i32 0, i64 3), ptr %tmp, align 16
+ store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @global_1, i64 0, i32 0, i64 3), ptr %tmp, align 16
%tmp.i1 = tail call i64 @foo()
%tmp44.i16 = getelementptr inbounds i16, ptr %tmp5, i64 undef
%tmp45.i17 = load i16, ptr %tmp44.i16, align 2
diff --git a/llvm/test/CodeGen/NVPTX/bswap.ll b/llvm/test/CodeGen/NVPTX/bswap.ll
new file mode 100644
index 0000000..3f929ec
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/bswap.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+define i16 @bswap16(i16 %a) {
+; CHECK-LABEL: bswap16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [bswap16_param_0];
+; CHECK-NEXT: shr.u16 %rs2, %rs1, 8;
+; CHECK-NEXT: shl.b16 %rs3, %rs1, 8;
+; CHECK-NEXT: or.b16 %rs4, %rs3, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r1, %rs4;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1;
+; CHECK-NEXT: ret;
+ %b = tail call i16 @llvm.bswap.i16(i16 %a)
+ ret i16 %b
+}
+
+
+define i32 @bswap32(i32 %a) {
+; CHECK-LABEL: bswap32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [bswap32_param_0];
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 291;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2;
+; CHECK-NEXT: ret;
+ %b = tail call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %b
+}
+
+
+define <2 x i16> @bswapv2i16(<2 x i16> %a) #0 {
+; CHECK-LABEL: bswapv2i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [bswapv2i16_param_0];
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 8961;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2;
+; CHECK-NEXT: ret;
+ %b = tail call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %a)
+ ret <2 x i16> %b
+}
+
+define i64 @bswap64(i64 %a) {
+; CHECK-LABEL: bswap64(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [bswap64_param_0];
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {%r1, tmp}, %rd1; }
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 291;
+; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r3}, %rd1; }
+; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 291;
+; CHECK-NEXT: mov.b64 %rd2, {%r4, %r2};
+; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; CHECK-NEXT: ret;
+ %b = tail call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %b
+}
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>)
+declare i64 @llvm.bswap.i64(i64)
diff --git a/llvm/test/CodeGen/NVPTX/common-linkage.ll b/llvm/test/CodeGen/NVPTX/common-linkage.ll
new file mode 100644
index 0000000..976074e
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/common-linkage.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=+ptx43 | FileCheck %s --check-prefixes CHECK,PTX43
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=+ptx50 | FileCheck %s --check-prefixes CHECK,PTX50
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | %ptxas-verify %}
+
+; PTX43: .weak .global .align 4 .u32 g
+; PTX50: .common .global .align 4 .u32 g
+@g = common addrspace(1) global i32 0, align 4
+
+; CHECK: .weak .const .align 4 .u32 c
+@c = common addrspace(4) global i32 0, align 4
+
+; CHECK: .weak .shared .align 4 .u32 s
+@s = common addrspace(3) global i32 0, align 4
+
+define i32 @f1() {
+ %1 = load i32, ptr addrspace(1) @g
+ ret i32 %1
+}
+
+define i32 @f4() {
+ %1 = load i32, ptr addrspace(4) @c
+ ret i32 %1
+}
+
+define i32 @f3() {
+ %1 = load i32, ptr addrspace(3) @s
+ ret i32 %1
+}
diff --git a/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll b/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll
index 3ef55ca..09297fb 100644
--- a/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll
+++ b/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll
@@ -1,10 +1,44 @@
-; RUN: not llc -march=nvptx < %s 2>&1 | FileCheck %s
-; RUN: not llc -march=nvptx64 < %s 2>&1 | FileCheck %s
+; RUN: not llc < %s -march=nvptx -mattr=+ptx72 -mcpu=sm_52 2>&1 | FileCheck %s --check-prefixes=CHECK-FAILS
+; RUN: not llc < %s -march=nvptx -mattr=+ptx73 -mcpu=sm_50 2>&1 | FileCheck %s --check-prefixes=CHECK-FAILS
-; CHECK: in function test_dynamic_stackalloc{{.*}}: dynamic alloca unsupported by NVPTX backend
+; RUN: llc < %s -march=nvptx -mattr=+ptx73 -mcpu=sm_52 | FileCheck %s --check-prefixes=CHECK,CHECK-32
+; RUN: llc < %s -march=nvptx64 -mattr=+ptx73 -mcpu=sm_52 | FileCheck %s --check-prefixes=CHECK,CHECK-64
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mattr=+ptx73 -mcpu=sm_52 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mattr=+ptx73 -mcpu=sm_52 | %ptxas-verify %}
-define void @test_dynamic_stackalloc(i64 %n) {
- %alloca = alloca i32, i64 %n
- store volatile i32 0, ptr %alloca
- ret void
+; CHECK-FAILS: in function test_dynamic_stackalloc{{.*}}: Support for dynamic alloca introduced in PTX ISA version 7.3 and requires target sm_52.
+
+; CHECK-LABEL: .visible .func (.param .b32 func_retval0) test_dynamic_stackalloc(
+; CHECK-NOT: __local_depot
+
+; CHECK-32: ld.param.u32 %r[[SIZE:[0-9]]], [test_dynamic_stackalloc_param_0];
+; CHECK-32-NEXT: mad.lo.s32 %r[[SIZE2:[0-9]]], %r[[SIZE]], 1, 7;
+; CHECK-32-NEXT: and.b32 %r[[SIZE3:[0-9]]], %r[[SIZE2]], -8;
+; CHECK-32-NEXT: alloca.u32 %r[[ALLOCA:[0-9]]], %r[[SIZE3]], 16;
+; CHECK-32-NEXT: cvta.local.u32 %r[[ALLOCA]], %r[[ALLOCA]];
+; CHECK-32-NEXT: { // callseq 0, 0
+; CHECK-32-NEXT: .reg .b32 temp_param_reg;
+; CHECK-32-NEXT: .param .b32 param0;
+; CHECK-32-NEXT: st.param.b32 [param0+0], %r[[ALLOCA]];
+
+; CHECK-64: ld.param.u64 %rd[[SIZE:[0-9]]], [test_dynamic_stackalloc_param_0];
+; CHECK-64-NEXT: add.s64 %rd[[SIZE2:[0-9]]], %rd[[SIZE]], 7;
+; CHECK-64-NEXT: and.b64 %rd[[SIZE3:[0-9]]], %rd[[SIZE2]], -8;
+; CHECK-64-NEXT: alloca.u64 %rd[[ALLOCA:[0-9]]], %rd[[SIZE3]], 16;
+; CHECK-64-NEXT: cvta.local.u64 %rd[[ALLOCA]], %rd[[ALLOCA]];
+; CHECK-64-NEXT: { // callseq 0, 0
+; CHECK-64-NEXT: .reg .b32 temp_param_reg;
+; CHECK-64-NEXT: .param .b64 param0;
+; CHECK-64-NEXT: st.param.b64 [param0+0], %rd[[ALLOCA]];
+
+; CHECK-NEXT: .param .b32 retval0;
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: bar,
+
+define i32 @test_dynamic_stackalloc(i64 %n) {
+ %alloca = alloca i8, i64 %n, align 16
+ %call = call i32 @bar(ptr %alloca)
+ ret i32 %call
}
+
+declare i32 @bar(ptr)
diff --git a/llvm/test/CodeGen/NVPTX/weak-global.ll b/llvm/test/CodeGen/NVPTX/weak-global.ll
index dd0160d..c5467aa 100644
--- a/llvm/test/CodeGen/NVPTX/weak-global.ll
+++ b/llvm/test/CodeGen/NVPTX/weak-global.ll
@@ -1,7 +1,10 @@
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | FileCheck %s --check-prefix PTX43
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | FileCheck %s --check-prefix PTX50
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | %ptxas-verify %}
-; CHECK: .weak .global .align 4 .u32 g
+; PTX43: .weak .global .align 4 .u32 g
+; PTX50: .common .global .align 4 .u32 g
@g = common addrspace(1) global i32 zeroinitializer
define i32 @func0() {
diff --git a/llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll b/llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll
new file mode 100644
index 0000000..ef1156e
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-codemodel-attr.ll
@@ -0,0 +1,166 @@
+; RUN: llc --verify-machineinstrs -mtriple powerpc-ibm-aix --code-model=small < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK32,CHECK-SMALL,CHECK-SMALL32 %s
+
+; RUN: llc --verify-machineinstrs -mtriple powerpc-ibm-aix --code-model=large < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK32,CHECK-LARGE,CHECK-LARGE32 %s
+
+; RUN: llc --verify-machineinstrs -mtriple powerpc64-ibm-aix --code-model=small < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK64,CHECK-SMALL,CHECK-SMALL64 %s
+
+; RUN: llc --verify-machineinstrs -mtriple powerpc64-ibm-aix --code-model=large < \
+; RUN: %s | FileCheck --check-prefixes=CHECK,CHECK64,CHECK-LARGE,CHECK-LARGE64 %s
+
+@a = external dso_local global i32, code_model "small", align 4
+@b = external dso_local global i32, code_model "large", align 4
+@c = dso_local global i32 55, code_model "small", align 4
+@d = dso_local global i32 41, code_model "large", align 4
+@e = external dso_local global i32, align 4
+@f = dso_local global i32 2748, align 4
+
+@large_aliasee = global i32 10, code_model "large", align 4
+@small_aliasee = global i32 171, code_model "small", align 4
+@normal_aliasee = global i32 2748, align 4
+
+@al = alias i32, ptr @large_aliasee
+@as = alias i32, ptr @small_aliasee
+@an = alias i32, ptr @normal_aliasee
+
+define i32 @A() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @a, align 4
+ ret i32 %0
+}
+; CHECK32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_A:[0-9]+]](2) # @a
+; CHECK64: ld [[SCRATCH:[0-9]+]], L..C[[TL_A:[0-9]+]](2) # @a
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define i32 @B() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @b, align 4
+ ret i32 %0
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_B:[0-9]+]]@u(2)
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_B]]@l([[HI]])
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_B]]@l([[HI]])
+; CHECK: lwz 3, 0([[ADDR]])
+; CHECK: blr
+
+define i32 @C() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @c, align 4
+ ret i32 %0
+}
+; CHECK32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_C:[0-9]+]](2) # @c
+; CHECK64: ld [[SCRATCH:[0-9]+]], L..C[[TL_C:[0-9]+]](2) # @c
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define i32 @D() local_unnamed_addr {
+entry:
+ %0 = load i32, ptr @d, align 4
+ ret i32 %0
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_D:[0-9]+]]@u(2)
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_D]]@l([[HI]])
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_D]]@l([[HI]])
+; CHECK: lwz 3, 0([[ADDR]])
+; CHECK: blr
+
+define i32 @E() {
+entry:
+ %0 = load i32, ptr @e, align 4
+ ret i32 %0
+}
+; CHECK-LARGE: addis [[HI:[0-9]+]], L..C[[TL_E:[0-9]+]]@u(2)
+; CHECK-LARGE32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_E]]@l([[HI]])
+; CHECK-SMALL32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_E:[0-9]+]](2)
+; CHECK-LARGE64: ld [[SCRATCH:[0-9]+]], L..C[[TL_E]]@l([[HI]])
+; CHECK-SMALL64: ld [[SCRATCH:[0-9]+]], L..C[[TL_E:[0-9]+]](2)
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define i32 @F() {
+entry:
+ %0 = load i32, ptr @f, align 4
+ ret i32 %0
+}
+; CHECK-LARGE: addis [[HI:[0-9]+]], L..C[[TL_F:[0-9]+]]@u(2)
+; CHECK-LARGE32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_F]]@l([[HI]])
+; CHECK-SMALL32: lwz [[SCRATCH:[0-9]+]], L..C[[TL_F:[0-9]+]](2)
+; CHECK-LARGE64: ld [[SCRATCH:[0-9]+]], L..C[[TL_F]]@l([[HI]])
+; CHECK-SMALL64: ld [[SCRATCH:[0-9]+]], L..C[[TL_F:[0-9]+]](2)
+; CHECK: lwz 3, 0([[SCRATCH]])
+; CHECK: blr
+
+define noundef nonnull ptr @addr_a() local_unnamed_addr {
+entry:
+ ret ptr @a
+}
+; CHECK32: lwz 3, L..C[[TL_A]](2) # @a
+; CHECK64: ld 3, L..C[[TL_A]](2) # @a
+; CHECK: blr
+
+define noundef nonnull ptr @addr_b() local_unnamed_addr {
+entry:
+ ret ptr @b
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_B]]@u(2)
+; CHECK32: lwz 3, L..C[[TL_B]]@l([[HI]])
+; CHECK64: ld 3, L..C[[TL_B]]@l([[HI]])
+; CHECK: blr
+
+
+define noundef nonnull ptr @addr_c() local_unnamed_addr {
+entry:
+ ret ptr @c
+}
+; CHECK32: lwz 3, L..C[[TL_C]](2) # @c
+; CHECK64: ld 3, L..C[[TL_C]](2) # @c
+; CHECK: blr
+
+define noundef nonnull ptr @addr_d() local_unnamed_addr {
+entry:
+ ret ptr @d
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_D]]@u(2)
+; CHECK32: lwz 3, L..C[[TL_D]]@l([[HI]])
+; CHECK64: ld 3, L..C[[TL_D]]@l([[HI]])
+; CHECK: blr
+
+define i32 @G() {
+ %tmp = load i32, ptr @al
+ ret i32 %tmp
+}
+; CHECK: addis [[HI:[0-9]+]], L..C[[TL_AL:[0-9]+]]@u(2)
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_AL]]@l([[HI]])
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_AL]]@l([[HI]])
+; CHECK: lwz 3, 0([[ADDR]])
+
+define i32 @H() {
+ %tmp = load i32, ptr @as
+ ret i32 %tmp
+}
+; CHECK32: lwz [[ADDR:[0-9]+]], L..C[[TL_AS:[0-9]+]](2)
+; CHECK64: ld [[ADDR:[0-9]+]], L..C[[TL_AS:[0-9]+]](2)
+; CHECK: lwz 3, 0([[ADDR]])
+
+;; Check TOC entires have correct storage mapping class
+; CHECK: L..C[[TL_A]]:
+; CHECK: .tc a[TC],a[UA]
+; CHECK: L..C[[TL_B]]:
+; CHECK: .tc b[TE],b[UA]
+; CHECK: L..C[[TL_C]]:
+; CHECK: .tc c[TC],c[RW]
+; CHECK: L..C[[TL_D]]:
+; CHECK: .tc d[TE],d[RW]
+; CHECK: L..C[[TL_E]]:
+; CHECK-SMALL: .tc e[TC],e[UA]
+; CHECK-LARGE: .tc e[TE],e[UA]
+; CHECK: L..C[[TL_F]]:
+; CHECK-SMALL: .tc f[TC],f[RW]
+; CHECK-LARGE: .tc f[TE],f[RW]
+; CHECK: L..C[[TL_AL]]:
+; CHECK: .tc al[TE],al
+; CHECK: L..C[[TL_AS]]:
+; CHECK: .tc as[TC],as
diff --git a/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py b/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py
new file mode 100644
index 0000000..276c6da
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py
@@ -0,0 +1,59 @@
+# UNSUPPORTED: expensive_checks, debug
+
+# RUN: %python %s > %t.ll
+# RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 < %t.ll | \
+# RUN: FileCheck --check-prefix=ASM32 %s
+
+# RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 < %t.ll | \
+# RUN: FileCheck --check-prefix=ASM64 %s
+
+# RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 \
+# RUN: -filetype=obj -o %t.o < %t.ll
+# RUN: llvm-objdump --no-print-imm-hex -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS32 %s
+
+# RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 \
+# RUN: -filetype=obj -o %t.o < %t.ll
+# RUN: llvm-objdump --no-print-imm-hex -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS64 %s
+
+numentries = 8195
+for x in range(0, numentries):
+ print("@a%d = global i32 0, align 4 #0" % (x))
+
+print("define void @foo() {")
+print("entry:")
+for x in range(0, numentries):
+ print("store i32 1, i32* @a%d, align 4" % (x))
+print("ret void")
+print("}")
+
+print('attributes #0 = { "toc-data" }')
+
+# 32-bit assembly check
+# ASM32: la 4, a0[TD](2)
+# ASM32: la 4, a1[TD](2)
+
+# ASM32: la 4, a8191[TD](2)
+# ASM32: la 4, a8192[TD](2)
+# ASM32: la 4, a8193[TD](2)
+
+# 64-bit assembly check
+# ASM64: la 4, a0[TD](2)
+# ASM64: la 4, a1[TD](2)
+
+# ASM64: la 4, a8191[TD](2)
+# ASM64: la 4, a8192[TD](2)
+# ASM64: la 4, a8193[TD](2)
+
+# DIS32: fffc: 38 82 7f fc addi 4, 2, 32764
+# DIS32: 0000fffe: R_TOC (idx: [[#NFA+16391]]) a8191[TD]
+# DIS32: 10004: 38 82 80 00 addi 4, 2, -32768
+# DIS32: 00010006: R_TOC (idx: [[#NFA+16393]]) a8192[TD]
+# DIS32: 1000c: 38 82 80 04 addi 4, 2, -32764
+# DIS32: 0001000e: R_TOC (idx: [[#NFA+16395]]) a8193[TD]
+
+# DIS64: fffc: 38 82 7f fc addi 4, 2, 32764
+# DIS64: 0000fffe: R_TOC (idx: [[#NFA+16391]]) a8191[TD]
+# DIS64: 10004: 38 82 80 00 addi 4, 2, -32768
+# DIS64: 00010006: R_TOC (idx: [[#NFA+16393]]) a8192[TD]
+# DIS64: 1000c: 38 82 80 04 addi 4, 2, -32764
+# DIS64: 0001000e: R_TOC (idx: [[#NFA+16395]]) a8193[TD]
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll
new file mode 100644
index 0000000..eb16bae
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll
@@ -0,0 +1,632 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64
+
+; Test disassembly of object.
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -xcoff-traceback-table=false \
+; RUN: --code-model=large -filetype=obj -o %t.o < %s
+; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS %s
+
+@ElementIntTLSv1 = thread_local(localdynamic) global [8187 x i32] zeroinitializer, align 4 ; Within 32K
+@ElementIntTLS2 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS3 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS4 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS5 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLSv2 = thread_local(localdynamic) global [9000 x i32] zeroinitializer, align 4 ; Beyond 32K
+
+@ElementLongTLS6 = external thread_local(localdynamic) global [60 x i64], align 8
+@ElementLongTLS2 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8 ; Within 32K
+@MyTLSGDVar = thread_local global [800 x i64] zeroinitializer, align 8
+@ElementLongTLS3 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS4 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS5 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS = thread_local(localdynamic) local_unnamed_addr global [7800 x i64] zeroinitializer, align 8 ; Beyond 32K
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) #1
+
+; All accesses use a "faster" local-dynamic sequence directly off the module handle.
+; Exercise PPCXCOFFObjectWriter::getRelocTypeAndSignSize/fixup_ppc_half16.
+define signext i32 @test1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r6, L..C1(r2) # target-flags(ppc-tlsld) @ElementIntTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r7, L..C2(r2) # target-flags(ppc-tlsld) @ElementIntTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r8, L..C3(r2) # target-flags(ppc-tlsld) @ElementIntTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r9, L..C4(r2) # target-flags(ppc-tlsld) @ElementIntTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r5, L..C5(r2) # target-flags(ppc-tlsld) @ElementIntTLSv1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r8, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r9, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r7, L..C2@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r8, L..C3@l(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r9, L..C4@l(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r5, L..C1@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r6, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLSv1)
+ store i32 1, ptr %tls1, align 4
+ %arrayidx1 = getelementptr inbounds [8187 x i32], ptr %tls1, i64 0, i64 6
+ store i32 4, ptr %arrayidx1, align 4
+ %tls2 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS2)
+ %arrayidx2 = getelementptr inbounds [4000 x i32], ptr %tls2, i64 0, i64 80
+ store i32 2, ptr %arrayidx2, align 4
+ %tls3 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS3)
+ %arrayidx3 = getelementptr inbounds [4000 x i32], ptr %tls3, i64 0, i64 81
+ store i32 3, ptr %arrayidx3, align 4
+ %tls4 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS4)
+ %arrayidx4 = getelementptr inbounds [4000 x i32], ptr %tls4, i64 0, i64 82
+ store i32 4, ptr %arrayidx4, align 4
+ %tls5 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS5)
+ %arrayidx5 = getelementptr inbounds [4000 x i32], ptr %tls5, i64 0, i64 83
+ store i32 88, ptr %arrayidx5, align 4
+ %load1 = load i32, ptr %tls1, align 4
+ %load2 = load i32, ptr %arrayidx1, align 4
+ %load3 = load i32, ptr %arrayidx2, align 4
+ %load4 = load i32, ptr %arrayidx3, align 4
+ %load5 = load i32, ptr %arrayidx4, align 4
+ %add = add i32 %load1, 88
+ %add6 = add i32 %add, %load2
+ %add8 = add i32 %add6, %load3
+ %add10 = add i32 %add8, %load4
+ %add12 = add i32 %add10, %load5
+ ret i32 %add12
+}
+
+; All accesses use a "faster" local-dynamic sequence directly off the module handle.
+; Exercise PPCXCOFFObjectWriter::getRelocTypeAndSignSize/fixup_ppc_half16ds.
+define i64 @test2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @ElementLongTLS6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 212
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r4, r6, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r3, 424(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C7(r2) # target-flags(ppc-tlsld) @ElementLongTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 203
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 1200(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C8(r2) # target-flags(ppc-tlsgdm) @MyTLSGDVar
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsgd) @MyTLSGDVar
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_addr[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 44
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 440(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C10(r2) # target-flags(ppc-tlsld) @ElementLongTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 2000(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @ElementLongTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 100
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r4, r6, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r3, 6800(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C12(r2) # target-flags(ppc-tlsld) @ElementLongTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 882
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 8400(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 1191
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 212
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C6@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 424(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 203
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C7@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 1200(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r4, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C8@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C9@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_addr[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 44
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 440(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C10@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 2000(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 100
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C11@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 6800(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C12@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 882
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C12@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 8400(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 1191
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS6)
+ %arrayidx = getelementptr inbounds [60 x i64], ptr %tls1, i64 0, i64 53
+ store i64 212, ptr %arrayidx, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS2)
+ %arrayidx1 = getelementptr inbounds [3000 x i64], ptr %tls2, i64 0, i64 150
+ store i64 203, ptr %arrayidx1, align 8
+ %tls3 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @MyTLSGDVar)
+ %arrayidx2 = getelementptr inbounds [800 x i64], ptr %tls3, i64 0, i64 55
+ store i64 44, ptr %arrayidx2, align 8
+ %tls4 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS3)
+ %arrayidx3 = getelementptr inbounds [3000 x i64], ptr %tls4, i64 0, i64 250
+ store i64 6, ptr %arrayidx3, align 8
+ %tls5 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS4)
+ %arrayidx4 = getelementptr inbounds [3000 x i64], ptr %tls5, i64 0, i64 850
+ store i64 100, ptr %arrayidx4, align 8
+ %tls6 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS5)
+ %arrayidx5 = getelementptr inbounds [3000 x i64], ptr %tls6, i64 0, i64 1050
+ store i64 882, ptr %arrayidx5, align 8
+ %load1 = load i64, ptr %arrayidx1, align 8
+ %load2 = load i64, ptr %arrayidx3, align 8
+ %load3 = load i64, ptr %arrayidx4, align 8
+ %add = add i64 %load1, 882
+ %add9 = add i64 %add, %load2
+ %add11 = add i64 %add9, %load3
+ ret i64 %add11
+}
+
+; Example of one access using the regular local-dynamic access from the TOC.
+define signext i32 @test3() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r6, L..C1(r2) # target-flags(ppc-tlsld) @ElementIntTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r7, L..C2(r2) # target-flags(ppc-tlsld) @ElementIntTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r8, L..C3(r2) # target-flags(ppc-tlsld) @ElementIntTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r9, L..C4(r2) # target-flags(ppc-tlsld) @ElementIntTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r5, L..C13(r2) # target-flags(ppc-tlsld) @ElementIntTLSv2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r8, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r9, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r7, L..C2@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r8, L..C3@l(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r9, L..C4@l(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r5, L..C13@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r6, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLSv2)
+ store i32 1, ptr %tls1, align 4
+ %arrayidx1 = getelementptr inbounds [9000 x i32], ptr %tls1, i64 0, i64 6
+ store i32 4, ptr %arrayidx1, align 4
+ %tls2 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS2)
+ %arrayidx2 = getelementptr inbounds [4000 x i32], ptr %tls2, i64 0, i64 80
+ store i32 2, ptr %arrayidx2, align 4
+ %tls3 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS3)
+ %arrayidx3 = getelementptr inbounds [4000 x i32], ptr %tls3, i64 0, i64 81
+ store i32 3, ptr %arrayidx3, align 4
+ %tls4 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS4)
+ %arrayidx4 = getelementptr inbounds [4000 x i32], ptr %tls4, i64 0, i64 82
+ store i32 4, ptr %arrayidx4, align 4
+ %tls5 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS5)
+ %arrayidx5 = getelementptr inbounds [4000 x i32], ptr %tls5, i64 0, i64 83
+ store i32 88, ptr %arrayidx5, align 4
+ %load1 = load i32, ptr %tls1, align 4
+ %load2 = load i32, ptr %arrayidx1, align 4
+ %load3 = load i32, ptr %arrayidx2, align 4
+ %load4 = load i32, ptr %arrayidx3, align 4
+ %load5 = load i32, ptr %arrayidx4, align 4
+ %add = add i32 %load1, 88
+ %add9 = add i32 %add, %load2
+ %add11 = add i32 %add9, %load3
+ %add13 = add i32 %add11, %load4
+ %add15 = add i32 %add13, %load5
+ ret i32 %add15
+}
+
+; DIS: file format aix5coff64-rs6000
+; DIS: Disassembly of section .text:
+; DIS: 0000000000000000 (idx: [[#NFA+9]]) .test1:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+25]]) ElementIntTLSv1[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 8, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 9, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 7, 16(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 8, 24(8)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 9, 32(9)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 5, 8(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+25]]) ElementIntTLSv1[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 1
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 6, 40(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 7, 3, 7
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 8, 3, 8
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 9, 3, 9
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 6, 3, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stwux 4, 3, 5
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 4
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 24(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 2
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 320(6)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 324(7)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 88
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 328(8)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 332(9)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 102
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: 0000000000000090 (idx: [[#NFA+11]]) .test2:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+35]]) ElementLongTLS6[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 212
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mr 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 48(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+35]]) ElementLongTLS6[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 424(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+37]]) ElementLongTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 203
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 56(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+37]]) ElementLongTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 1200(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+39]]) .MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 4, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+41]]) MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 64(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+39]]) .MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 4, 72(4)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+41]]) MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+3]]) .__tls_get_addr[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 44
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 440(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+43]]) ElementLongTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 80(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+43]]) ElementLongTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 2000(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+45]]) ElementLongTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 100
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 88(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+45]]) ElementLongTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 6800(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+47]]) ElementLongTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 882
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 96(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+47]]) ElementLongTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 8400(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 1191
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: 0000000000000140 (idx: [[#NFA+13]]) .test3:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+49]]) ElementIntTLSv2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 8, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 9, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 7, 16(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 8, 24(8)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 9, 32(9)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 5, 104(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+49]]) ElementIntTLSv2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 1
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 6, 40(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 7, 3, 7
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 8, 3, 8
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 9, 3, 9
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 6, 3, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stwux 4, 3, 5
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 4
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 24(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 2
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 320(6)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 324(7)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 88
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 328(8)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 332(9)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 102
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: Disassembly of section .data:
+
+; DIS: 00000000000001d0 (idx: 17) test1[DS]:
+; DIS-NEXT: 1d0: 00 00 00 00
+; DIS-NEXT: 00000000000001d0: R_POS (idx: [[#NFA+9]]) .test1
+; DIS-NEXT: 1d4: 00 00 00 00
+; DIS-NEXT: 1d8: 00 00 00 00
+; DIS-NEXT: 00000000000001d8: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 1dc: 00 00 02 18
+
+; DIS: 00000000000001e8 (idx: 19) test2[DS]:
+; DIS-NEXT: 1e8: 00 00 00 00
+; DIS-NEXT: 00000000000001e8: R_POS (idx: [[#NFA+11]]) .test2
+; DIS-NEXT: 1ec: 00 00 00 90
+; DIS-NEXT: 1f0: 00 00 00 00
+; DIS-NEXT: 00000000000001f0: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 1f4: 00 00 02 18
+
+; DIS: 0000000000000200 (idx: 21) test3[DS]:
+; DIS-NEXT: 200: 00 00 00 00
+; DIS-NEXT: 0000000000000200: R_POS (idx: [[#NFA+13]]) .test3
+; DIS-NEXT: 204: 00 00 01 40
+; DIS-NEXT: 208: 00 00 00 00
+; DIS-NEXT: 0000000000000208: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 20c: 00 00 02 18
+
+; DIS: 0000000000000218 (idx: 25) _$TLSML[TC]:
+; DIS-NEXT: 218: 00 00 00 00
+; DIS-NEXT: 0000000000000218: R_TLSML (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: 21c: 00 00 00 00
+
+; DIS: 0000000000000220 (idx: 27) ElementIntTLSv1[TE]:
+; DIS-NEXT: 220: 00 00 00 00
+; DIS-NEXT: 0000000000000220: R_TLS_LD (idx: [[#NFA+51]]) ElementIntTLSv1[TL]
+; DIS-NEXT: 224: 00 00 00 00
+
+; DIS: 0000000000000228 (idx: 29) ElementIntTLS3[TE]:
+; DIS-NEXT: 228: 00 00 00 00
+; DIS-NEXT: 0000000000000228: R_TLS_LD (idx: [[#NFA+55]]) ElementIntTLS3[TL]
+; DIS-NEXT: 22c: 00 00 be 6c
+
+; DIS: 0000000000000230 (idx: 31) ElementIntTLS4[TE]:
+; DIS-NEXT: 230: 00 00 00 00
+; DIS-NEXT: 0000000000000230: R_TLS_LD (idx: [[#NFA+57]]) ElementIntTLS4[TL]
+; DIS-NEXT: 234: 00 00 fc ec
+
+; DIS: 0000000000000238 (idx: 33) ElementIntTLS5[TE]:
+; DIS-NEXT: 238: 00 00 00 00
+; DIS-NEXT: 0000000000000238: R_TLS_LD (idx: [[#NFA+59]]) ElementIntTLS5[TL]
+; DIS-NEXT: 23c: 00 01 3b 6c
+
+; DIS: 0000000000000240 (idx: 35) ElementIntTLS2[TE]:
+; DIS-NEXT: 240: 00 00 00 00
+; DIS-NEXT: 0000000000000240: R_TLS_LD (idx: [[#NFA+53]]) ElementIntTLS2[TL]
+; DIS-NEXT: 244: 00 00 7f ec
+
+; DIS: 0000000000000248 (idx: 37) ElementLongTLS6[TE]:
+; DIS-NEXT: 248: 00 00 00 00
+; DIS-NEXT: 0000000000000248: R_TLS_LD (idx: [[#NFA+5]]) ElementLongTLS6[UL]
+; DIS-NEXT: 24c: 00 00 00 00
+
+; DIS: 0000000000000250 (idx: 39) ElementLongTLS2[TE]:
+; DIS-NEXT: 250: 00 00 00 00
+; DIS-NEXT: 0000000000000250: R_TLS_LD (idx: [[#NFA+63]]) ElementLongTLS2[TL]
+; DIS-NEXT: 254: 00 02 06 90
+
+; DIS: 0000000000000258 (idx: 41) .MyTLSGDVar[TE]:
+; DIS-NEXT: 258: 00 00 00 00
+; DIS-NEXT: 0000000000000258: R_TLSM (idx: [[#NFA+65]]) MyTLSGDVar[TL]
+; DIS-NEXT: 25c: 00 00 00 00
+
+; DIS: 0000000000000260 (idx: 43) MyTLSGDVar[TE]:
+; DIS-NEXT: 260: 00 00 00 00
+; DIS-NEXT: 0000000000000260: R_TLS (idx: [[#NFA+65]]) MyTLSGDVar[TL]
+; DIS-NEXT: 264: 00 02 64 50
+
+; DIS: 0000000000000268 (idx: 45) ElementLongTLS3[TE]:
+; DIS-NEXT: 268: 00 00 00 00
+; DIS-NEXT: 0000000000000268: R_TLS_LD (idx: [[#NFA+67]]) ElementLongTLS3[TL]
+; DIS-NEXT: 26c: 00 02 7d 50
+
+; DIS: 0000000000000270 (idx: 47) ElementLongTLS4[TE]:
+; DIS-NEXT: 270: 00 00 00 00
+; DIS-NEXT: 0000000000000270: R_TLS_LD (idx: [[#NFA+69]]) ElementLongTLS4[TL]
+; DIS-NEXT: 274: 00 02 db 10
+
+; DIS: 0000000000000278 (idx: 49) ElementLongTLS5[TE]:
+; DIS-NEXT: 278: 00 00 00 00
+; DIS-NEXT: 0000000000000278: R_TLS_LD (idx: [[#NFA+71]]) ElementLongTLS5[TL]
+; DIS-NEXT: 27c: 00 03 38 d0
+
+; DIS: 0000000000000280 (idx: 51) ElementIntTLSv2[TE]:
+; DIS-NEXT: 280: 00 00 00 00
+; DIS-NEXT: 0000000000000280: R_TLS_LD (idx: [[#NFA+61]]) ElementIntTLSv2[TL]
+; DIS-NEXT: 284: 00 01 79 ec
+
+; DIS: Disassembly of section .tdata:
+; DIS: 0000000000000000 (idx: [[#NFA+51]]) ElementIntTLSv1[TL]:
+; DIS: 0000000000007fec (idx: [[#NFA+53]]) ElementIntTLS2[TL]:
+; DIS: 000000000000be6c (idx: [[#NFA+55]]) ElementIntTLS3[TL]:
+; DIS: 000000000000fcec (idx: [[#NFA+57]]) ElementIntTLS4[TL]:
+; DIS: 0000000000013b6c (idx: [[#NFA+59]]) ElementIntTLS5[TL]:
+; DIS: 00000000000179ec (idx: [[#NFA+61]]) ElementIntTLSv2[TL]:
+; DIS: 0000000000020690 (idx: [[#NFA+63]]) ElementLongTLS2[TL]:
+; DIS: 0000000000026450 (idx: [[#NFA+65]]) MyTLSGDVar[TL]:
+; DIS: 0000000000027d50 (idx: [[#NFA+67]]) ElementLongTLS3[TL]:
+; DIS: 000000000002db10 (idx: [[#NFA+69]]) ElementLongTLS4[TL]:
+; DIS: 00000000000338d0 (idx: [[#NFA+71]]) ElementLongTLS5[TL]:
+; DIS: 0000000000039690 (idx: [[#NFA+73]]) ElementLongTLS[TL]:
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll
new file mode 100644
index 0000000..d996d86
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll
@@ -0,0 +1,1066 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64
+; RUN: llc -O0 -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64-O0
+; RUN: llc -O0 -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64-O0
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) #1
+@tlv_int_init = local_unnamed_addr global i32 87, align 4
+
+@tlv_char = thread_local(localdynamic) global i8 1, align 1
+@tlv_short = thread_local(localdynamic) global i8 1, align 2
+@tlv_int = thread_local(localdynamic) global i32 1, align 4
+@internal_tlv_int = internal thread_local(localdynamic) global i32 1, align 4
+@tlv_long = thread_local(localdynamic) global i64 1, align 8
+@internal_tlv_long = internal thread_local(localdynamic) global i64 1, align 8
+@tlv_float = thread_local(localdynamic) global float 1.000000e+00, align 4
+@internal_tlv_double = internal thread_local(localdynamic) global double 1.000000e+00, align 8
+
+%struct.anon = type { i32 }
+@ThreadLocalStruct = thread_local(localdynamic) global %struct.anon zeroinitializer, align 1
+@a = thread_local(localdynamic) global [87 x i32] zeroinitializer, align 4
+
+define nonnull ptr @AddrTest1() local_unnamed_addr {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C1(r2) # target-flags(ppc-tlsld) @a
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C1@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C1(r2) # target-flags(ppc-tlsld) @a
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C0@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @a)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tlv_addr, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define signext i32 @testUnaligned() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C2(r2) # target-flags(ppc-tlsld) @ThreadLocalStruct
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C2@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C2(r2) # target-flags(ppc-tlsld) @ThreadLocalStruct
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C2@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @ThreadLocalStruct)
+ %x = getelementptr inbounds %struct.anon, ptr %tlv_addr, i32 0, i32 0
+ %value = load i32, ptr %x, align 1
+ ret i32 %value
+}
+
+define void @testChar(i8 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C3(r2) # target-flags(ppc-tlsld) @tlv_char
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stbx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C3@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stbx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r5, L..C3(r2) # target-flags(ppc-tlsld) @tlv_char
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stb r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, L..C3@l(r5)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stb r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @tlv_char)
+ store i8 %x, ptr %tlv_addr, align 1
+ ret void
+}
+
+define void @testShort(i16 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C4(r2) # target-flags(ppc-tlsld) @tlv_short
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: sthx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C4@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: sthx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r5, L..C4(r2) # target-flags(ppc-tlsld) @tlv_short
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: sth r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, L..C4@l(r5)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: sth r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @tlv_short)
+ store i16 %x, ptr %tlv_addr, align 2
+ ret void
+}
+
+define signext i32 @testInt1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C5(r2) # target-flags(ppc-tlsld) @tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C5(r2) # target-flags(ppc-tlsld) @tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C5@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_int)
+ %value = load i32, ptr %tlv_addr, align 4
+ ret i32 %value
+}
+
+define signext i32 @testInt2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @internal_tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwzx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C7(r2) # @tlv_int_init
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r4, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C6@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwzx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r4, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C7@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r4, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @internal_tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r4, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C7(r2) # @tlv_int_init
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C6@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r4, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C7@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @internal_tlv_int)
+ %tlv_val = load i32, ptr %tlv_addr, align 4
+ %global_val = load i32, ptr @tlv_int_init, align 4
+ %sum = add nsw i32 %global_val, %tlv_val
+ ret i32 %sum
+}
+
+define signext i64 @testLong1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C8@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C8@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @tlv_long)
+ %value = load i64, ptr %tlv_addr, align 4
+ ret i64 %value
+}
+
+define void @testLong2(i64 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsld) @internal_tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r5, r5, 9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C9@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r5, r5, 9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsld) @internal_tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r3, 9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C9@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r3, 9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_long)
+ %value = load i64, ptr %tlv_addr, align 8
+ %add = add nsw i64 %value, 9
+ store i64 %add, ptr %tlv_addr, align 8
+ ret void
+}
+
+define i32 @testLong3() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C8@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C8@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @tlv_long)
+ %value = load i64, ptr %tlv_addr, align 8
+ %conv = trunc i64 %value to i32
+ ret i32 %conv
+}
+
+define void @testFloat1(float noundef %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: vspltisw v2, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: vspltisw v3, 8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xvcvsxwdp vs0, vs34
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfsx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: fadds f0, f1, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xvcvsxwdp vs1, vs35
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: vspltisw v2, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: vspltisw v3, 8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xvcvsxwdp vs0, vs34
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C10@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfsx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: fadds f0, f1, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xvcvsxwdp vs1, vs35
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C11(r2) # %const.1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C12(r2) # %const.0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C10@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r4, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C11@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r4, L..C12@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C12@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_float)
+ %value = load float, ptr %tlv_addr, align 4
+ %inc = fadd float %value, 1.000000e+00
+ %add = fadd float %inc, 8.000000e+00
+ store float %add, ptr %tlv_addr, align 4
+ ret void
+}
+
+define i32 @testFloat2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C10@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fctiwz f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfd f0, 56(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C10@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fctiwz f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfd f0, 56(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_float)
+ %value = load float, ptr %tlv_addr, align 4
+ %conv = fptosi float %value to i32
+ ret i32 %conv
+}
+
+define void @testDouble1(double noundef %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfdx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C11@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfdx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C13(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stxsdx f1, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C13@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stxsdx f1, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_double)
+ store double %x, ptr %tlv_addr, align 8
+ ret void
+}
+
+define i32 @testDouble2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C11@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C13(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r1, 52
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 52(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 96(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C13@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r1, 68
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 68(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 80
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_double)
+ %value = load double, ptr %tlv_addr, align 8
+ %conv = fptosi double %value to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll
new file mode 100644
index 0000000..38b35dc
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll
@@ -0,0 +1,105 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefixes=COMMONCM,CHECK-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s --check-prefixes=COMMONCM,CHECK-LARGECM64
+
+@mySmallTLS = thread_local(localexec) global [7800 x i64] zeroinitializer, align 8 #0
+@mySmallTLS2 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8 #0
+@mySmallTLS3 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+; All accesses use a "faster" local-exec sequence directly off the thread pointer,
+; except for mySmallTLS, as this variable is over the 32KB size limit.
+define i64 @StoreLargeAccess1() #1 {
+; COMMONCM-LABEL: StoreLargeAccess1:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; CHECK-SMALLCM64: ld r3, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; CHECK-SMALLCM64-NEXT: li r4, 0
+; CHECK-SMALLCM64-NEXT: li r5, 23
+; CHECK-LARGECM64: addis r3, L..C0@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 0
+; CHECK-LARGECM64-NEXT: li r5, 23
+; CHECK-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; COMMONCM: ori r4, r4, 53328
+; COMMONCM-NEXT: add r3, r13, r3
+; COMMONCM-NEXT: stdx r5, r3, r4
+; COMMONCM-NEXT: li r3, 55
+; COMMONCM-NEXT: li r4, 64
+; COMMONCM-NEXT: std r3, (mySmallTLS2[TL]@le+696)-65536(r13)
+; COMMONCM-NEXT: li r3, 142
+; COMMONCM-NEXT: std r4, (mySmallTLS3[TL]@le+20000)-131072(r13)
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+; Since this function does not have the 'aix-small-local-exec-tls` attribute,
+; only some local-exec variables should have the small-local-exec TLS access
+; sequence (as opposed to all of them).
+define i64 @StoreLargeAccess2() {
+; COMMONCM-LABEL: StoreLargeAccess2:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; CHECK-SMALLCM64: ld r5, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; CHECK-SMALLCM64-NEXT: li r3, 0
+; CHECK-SMALLCM64-NEXT: li r4, 23
+; CHECK-SMALLCM64-NEXT: ori r3, r3, 53328
+; CHECK-SMALLCM64-NEXT: add r5, r13, r5
+; CHECK-SMALLCM64-NEXT: stdx r4, r5, r3
+; CHECK-SMALLCM64-NEXT: ld r5, L..C1(r2) # target-flags(ppc-tprel) @mySmallTLS3
+; CHECK-SMALLCM64-NEXT: li r3, 55
+; CHECK-SMALLCM64-NEXT: li r4, 64
+; CHECK-SMALLCM64-NEXT: std r3, mySmallTLS2[TL]@le+696(r13)
+; CHECK-SMALLCM64-NEXT: li r3, 142
+; CHECK-SMALLCM64-NEXT: add r5, r13, r5
+; CHECK-SMALLCM64-NEXT: std r4, 20000(r5)
+; CHECK-LARGECM64: addis r3, L..C0@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 0
+; CHECK-LARGECM64-NEXT: li r5, 23
+; CHECK-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; CHECK-LARGECM64-NEXT: ori r4, r4, 53328
+; CHECK-LARGECM64-NEXT: add r3, r13, r3
+; CHECK-LARGECM64-NEXT: stdx r5, r3, r4
+; CHECK-LARGECM64-NEXT: addis r3, L..C1@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 55
+; CHECK-LARGECM64-NEXT: li r5, 64
+; CHECK-LARGECM64-NEXT: ld r3, L..C1@l(r3)
+; CHECK-LARGECM64-NEXT: std r4, mySmallTLS2[TL]@le+696(r13)
+; CHECK-LARGECM64-NEXT: add r3, r13, r3
+; CHECK-LARGECM64-NEXT: std r5, 20000(r3)
+; CHECK-LARGECM64-NEXT: li r3, 142
+; COMMONCM-NEXT: blr
+;
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+attributes #0 = { "aix-small-tls" }
+attributes #1 = { "target-features"="+aix-small-local-exec-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll
new file mode 100644
index 0000000..c8537fb
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll
@@ -0,0 +1,222 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -mattr=-aix-small-local-exec-tls \
+; RUN: < %s | FileCheck %s --check-prefixes=COMMONCM,SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: -mattr=-aix-small-local-exec-tls < %s | \
+; RUN: FileCheck %s --check-prefixes=COMMONCM,LARGECM64
+
+; Test that the 'aix-small-tls' global variable attribute generates the
+; optimized small-local-exec TLS sequence. Global variables without this
+; attribute should still generate a TOC-based local-exec access sequence.
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+@a = thread_local(localexec) global [87 x i8] zeroinitializer, align 1 #0
+@a_noattr = thread_local(localexec) global [87 x i8] zeroinitializer, align 1
+@b = thread_local(localexec) global [87 x i16] zeroinitializer, align 2 #0
+@b_noattr = thread_local(localexec) global [87 x i16] zeroinitializer, align 2
+@c = thread_local(localexec) global [87 x i32] zeroinitializer, align 4 #0
+@c_noattr = thread_local(localexec) global [87 x i32] zeroinitializer, align 4
+@d = thread_local(localexec) global [87 x i64] zeroinitializer, align 8 #0
+@d_noattr = thread_local(localexec) global [87 x i64] zeroinitializer, align 8 #0
+
+@e = thread_local(localexec) global [87 x double] zeroinitializer, align 8 #0
+@e_noattr = thread_local(localexec) global [87 x double] zeroinitializer, align 8
+@f = thread_local(localexec) global [87 x float] zeroinitializer, align 4 #0
+@f_noattr = thread_local(localexec) global [87 x float] zeroinitializer, align 4
+
+define nonnull ptr @AddrTest1() {
+; COMMONCM-LABEL: AddrTest1:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, a[TL]@le+1
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @a)
+ %arrayidx = getelementptr inbounds [87 x i8], ptr %tls0, i64 0, i64 1
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest1_NoAttr() {
+; SMALLCM64-LABEL: AddrTest1_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tprel) @a_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 1
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest1_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 1
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @a_noattr)
+ %arrayidx = getelementptr inbounds [87 x i8], ptr %tls0, i64 0, i64 1
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest2() {
+; COMMONCM-LABEL: AddrTest2:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, b[TL]@le+4
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @b)
+ %arrayidx = getelementptr inbounds [87 x i16], ptr %tls0, i64 0, i64 2
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest2_NoAttr() {
+; SMALLCM64-LABEL: AddrTest2_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C1(r2) # target-flags(ppc-tprel) @b_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 4
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest2_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C1@u(r2)
+; LARGECM64-NEXT: ld r3, L..C1@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 4
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @b_noattr)
+ %arrayidx = getelementptr inbounds [87 x i16], ptr %tls0, i64 0, i64 2
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest3() {
+; COMMONCM-LABEL: AddrTest3:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, c[TL]@le+12
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @c)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tls0, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest3_NoAttr() {
+; SMALLCM64-LABEL: AddrTest3_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C2(r2) # target-flags(ppc-tprel) @c_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 12
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest3_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C2@u(r2)
+; LARGECM64-NEXT: ld r3, L..C2@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 12
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @c_noattr)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tls0, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest4() {
+; COMMONCM-LABEL: AddrTest4:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, c[TL]@le+56
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @c)
+ %arrayidx = getelementptr inbounds [87 x i64], ptr %tls0, i64 0, i64 7
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest4_NoAttr() {
+; SMALLCM64-LABEL: AddrTest4_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C2(r2) # target-flags(ppc-tprel) @c_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 56
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest4_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C2@u(r2)
+; LARGECM64-NEXT: ld r3, L..C2@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 56
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @c_noattr)
+ %arrayidx = getelementptr inbounds [87 x i64], ptr %tls0, i64 0, i64 7
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest5() {
+; COMMONCM-LABEL: AddrTest5:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, e[TL]@le+48
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @e)
+ %arrayidx = getelementptr inbounds [87 x double], ptr %tls0, i64 0, i64 6
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest5_NoAttr() {
+; SMALLCM64-LABEL: AddrTest5_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C3(r2) # target-flags(ppc-tprel) @e_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 48
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest5_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C3@u(r2)
+; LARGECM64-NEXT: ld r3, L..C3@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 48
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @e_noattr)
+ %arrayidx = getelementptr inbounds [87 x double], ptr %tls0, i64 0, i64 6
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest6() {
+; COMMONCM-LABEL: AddrTest6:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, f[TL]@le+16
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @f)
+ %arrayidx = getelementptr inbounds [87 x float], ptr %tls0, i64 0, i64 4
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest6_NoAttr() {
+; SMALLCM64-LABEL: AddrTest6_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C4(r2) # target-flags(ppc-tprel) @f_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 16
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest6_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C4@u(r2)
+; LARGECM64-NEXT: ld r3, L..C4@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 16
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @f_noattr)
+ %arrayidx = getelementptr inbounds [87 x float], ptr %tls0, i64 0, i64 4
+ ret ptr %arrayidx
+}
+
+attributes #0 = { "aix-small-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll
new file mode 100644
index 0000000..1e4a3b9
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll
@@ -0,0 +1,53 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -mattr=+aix-small-local-exec-tls < %s \
+; RUN: | FileCheck %s --check-prefixes=COMMONCM,SMALL-LOCAL-EXEC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: -mattr=+aix-small-local-exec-tls < %s | FileCheck %s \
+; RUN: --check-prefixes=COMMONCM,SMALL-LOCAL-EXEC-LARGECM64
+
+@mySmallTLS = thread_local(localexec) global [7800 x i64] zeroinitializer, align 8 #0
+@mySmallTLS2 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8 #0
+@mySmallTLS3 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+; Although some global variables are annotated with 'aix-small-tls', because the
+; aix-small-local-exec-tls target attribute is turned on, all accesses will use
+; a "faster" local-exec sequence directly off the thread pointer.
+define i64 @StoreLargeAccess1() {
+; COMMONCM-LABEL: StoreLargeAccess1:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; SMALL-LOCAL-EXEC-SMALLCM64: ld r3, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: li r4, 0
+; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: li r5, 23
+; SMALL-LOCAL-EXEC-LARGECM64: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: li r4, 0
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: li r5, 23
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; COMMONCM: ori r4, r4, 53328
+; COMMONCM-NEXT: add r3, r13, r3
+; COMMONCM-NEXT: stdx r5, r3, r4
+; COMMONCM-NEXT: li r3, 55
+; COMMONCM-NEXT: li r4, 64
+; COMMONCM-NEXT: std r3, (mySmallTLS2[TL]@le+696)-65536(r13)
+; COMMONCM-NEXT: li r3, 142
+; COMMONCM-NEXT: std r4, (mySmallTLS3[TL]@le+20000)-131072(r13)
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+attributes #0 = { "aix-small-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll b/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll
new file mode 100644
index 0000000..4e94228
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll
@@ -0,0 +1,142 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
+; RUN: -xcoff-traceback-table=false -filetype=obj -function-sections -o %t.o < %s
+; RUN: llvm-readobj -s %t.o | FileCheck %s
+
+define dso_local signext i32 @foo1() section "sect" {
+entry:
+ ret i32 1
+}
+
+define dso_local signext i32 @foo2() section "sect2" {
+entry:
+ ret i32 2
+}
+
+define dso_local signext i32 @foo3() section "sect2" {
+entry:
+ ret i32 3
+}
+
+define dso_local signext i32 @foo4() {
+entry:
+ ret i32 4
+}
+
+; CHECK: Symbol {{[{][[:space:]] *}}Index: [[#INDX:]]{{[[:space:]] *}}Name: sect
+; CHECK-NEXT: Value (RelocatableAddress): 0x0
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_HIDEXT (0x6B)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+1]]
+; CHECK-NEXT: SectionLen: 8
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+2]]
+; CHECK-NEXT: Name: .foo1
+; CHECK-NEXT: Value (RelocatableAddress): 0x0
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+3]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+4]]
+; CHECK-NEXT: Name: sect2
+; CHECK-NEXT: Value (RelocatableAddress): 0x20
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_HIDEXT (0x6B)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+5]]
+; CHECK-NEXT: SectionLen: 24
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+6]]
+; CHECK-NEXT: Name: .foo2
+; CHECK-NEXT: Value (RelocatableAddress): 0x20
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+7]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX+4]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+8]]
+; CHECK-NEXT: Name: .foo3
+; CHECK-NEXT: Value (RelocatableAddress): 0x30
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+9]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX+4]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+10]]
+; CHECK-NEXT: Name: .foo4
+; CHECK-NEXT: Value (RelocatableAddress): 0x40
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: 16
+; CHECK-NEXT: SectionLen: 8
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
diff --git a/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll b/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll
index c1d1461..50ebe04 100644
--- a/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll
+++ b/llvm/test/CodeGen/PowerPC/ctrloop-constrained-fp.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple powerpc64le < %s | FileCheck %s
; Check constrained ops converted to call
-define void @test(ptr %cast) {
+define void @test(ptr %cast) strictfp {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %root
; CHECK-NEXT: mflr 0
@@ -51,7 +51,7 @@ for.body:
}
; Check constrained ops converted to native instruction
-define void @test2(ptr %cast) {
+define void @test2(ptr %cast) strictfp {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 255
diff --git a/llvm/test/CodeGen/PowerPC/fp-classify.ll b/llvm/test/CodeGen/PowerPC/fp-classify.ll
index 7de35b8..f527b3c 100644
--- a/llvm/test/CodeGen/PowerPC/fp-classify.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-classify.ll
@@ -57,30 +57,18 @@ entry:
define zeroext i1 @abs_isinfq(fp128 %x) {
; P8-LABEL: abs_isinfq:
; P8: # %bb.0: # %entry
-; P8-NEXT: mflr 0
-; P8-NEXT: stdu 1, -48(1)
-; P8-NEXT: std 0, 64(1)
-; P8-NEXT: .cfi_def_cfa_offset 48
-; P8-NEXT: .cfi_offset lr, 16
; P8-NEXT: xxswapd 0, 34
-; P8-NEXT: addi 3, 1, 32
+; P8-NEXT: addi 3, 1, -16
+; P8-NEXT: li 5, 32767
; P8-NEXT: stxvd2x 0, 0, 3
-; P8-NEXT: lbz 4, 47(1)
-; P8-NEXT: clrlwi 4, 4, 25
-; P8-NEXT: stb 4, 47(1)
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: addis 3, 2, .LCPI2_0@toc@ha
-; P8-NEXT: addi 3, 3, .LCPI2_0@toc@l
-; P8-NEXT: xxswapd 34, 0
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: xxswapd 35, 0
-; P8-NEXT: bl __eqkf2
-; P8-NEXT: nop
-; P8-NEXT: cntlzw 3, 3
-; P8-NEXT: srwi 3, 3, 5
-; P8-NEXT: addi 1, 1, 48
-; P8-NEXT: ld 0, 16(1)
-; P8-NEXT: mtlr 0
+; P8-NEXT: rldic 5, 5, 48, 1
+; P8-NEXT: ld 4, -8(1)
+; P8-NEXT: ld 3, -16(1)
+; P8-NEXT: clrldi 4, 4, 1
+; P8-NEXT: xor 4, 4, 5
+; P8-NEXT: or 3, 3, 4
+; P8-NEXT: cntlzd 3, 3
+; P8-NEXT: rldicl 3, 3, 58, 63
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfq:
@@ -99,12 +87,13 @@ entry:
define zeroext i1 @abs_isinfornanf(float %x) {
; P8-LABEL: abs_isinfornanf:
; P8: # %bb.0: # %entry
-; P8-NEXT: addis 3, 2, .LCPI3_0@toc@ha
-; P8-NEXT: xsabsdp 0, 1
-; P8-NEXT: lfs 1, .LCPI3_0@toc@l(3)
-; P8-NEXT: li 3, 1
-; P8-NEXT: fcmpu 0, 0, 1
-; P8-NEXT: isellt 3, 0, 3
+; P8-NEXT: xscvdpspn 0, 1
+; P8-NEXT: lis 4, 32639
+; P8-NEXT: ori 4, 4, 65535
+; P8-NEXT: mffprwz 3, 0
+; P8-NEXT: clrlwi 3, 3, 1
+; P8-NEXT: sub 3, 4, 3
+; P8-NEXT: rldicl 3, 3, 1, 63
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfornanf:
@@ -123,12 +112,15 @@ entry:
define zeroext i1 @abs_isinfornan(double %x) {
; P8-LABEL: abs_isinfornan:
; P8: # %bb.0: # %entry
-; P8-NEXT: addis 3, 2, .LCPI4_0@toc@ha
-; P8-NEXT: xsabsdp 0, 1
-; P8-NEXT: lfs 1, .LCPI4_0@toc@l(3)
-; P8-NEXT: li 3, 1
-; P8-NEXT: fcmpu 0, 0, 1
-; P8-NEXT: isellt 3, 0, 3
+; P8-NEXT: mffprd 3, 1
+; P8-NEXT: li 4, -33
+; P8-NEXT: rldicl 4, 4, 47, 1
+; P8-NEXT: sradi 5, 4, 63
+; P8-NEXT: clrldi 3, 3, 1
+; P8-NEXT: rldicl 6, 3, 1, 63
+; P8-NEXT: subc 3, 4, 3
+; P8-NEXT: adde 3, 6, 5
+; P8-NEXT: xori 3, 3, 1
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfornan:
@@ -147,53 +139,18 @@ entry:
define zeroext i1 @abs_isinfornanq(fp128 %x) {
; P8-LABEL: abs_isinfornanq:
; P8: # %bb.0: # %entry
-; P8-NEXT: mflr 0
-; P8-NEXT: stdu 1, -112(1)
-; P8-NEXT: std 0, 128(1)
-; P8-NEXT: .cfi_def_cfa_offset 112
-; P8-NEXT: .cfi_offset lr, 16
-; P8-NEXT: .cfi_offset r30, -16
-; P8-NEXT: .cfi_offset v30, -48
-; P8-NEXT: .cfi_offset v31, -32
-; P8-NEXT: li 3, 64
; P8-NEXT: xxswapd 0, 34
-; P8-NEXT: std 30, 96(1) # 8-byte Folded Spill
-; P8-NEXT: stvx 30, 1, 3 # 16-byte Folded Spill
-; P8-NEXT: li 3, 80
-; P8-NEXT: stvx 31, 1, 3 # 16-byte Folded Spill
-; P8-NEXT: addi 3, 1, 48
+; P8-NEXT: addi 3, 1, -16
+; P8-NEXT: li 4, -3
; P8-NEXT: stxvd2x 0, 0, 3
-; P8-NEXT: lbz 4, 63(1)
-; P8-NEXT: clrlwi 4, 4, 25
-; P8-NEXT: stb 4, 63(1)
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: addis 3, 2, .LCPI5_0@toc@ha
-; P8-NEXT: addi 3, 3, .LCPI5_0@toc@l
-; P8-NEXT: xxswapd 63, 0
-; P8-NEXT: lxvd2x 0, 0, 3
-; P8-NEXT: vmr 2, 31
-; P8-NEXT: xxswapd 62, 0
-; P8-NEXT: vmr 3, 30
-; P8-NEXT: bl __eqkf2
-; P8-NEXT: nop
-; P8-NEXT: cntlzw 3, 3
-; P8-NEXT: vmr 2, 31
-; P8-NEXT: vmr 3, 30
-; P8-NEXT: srwi 30, 3, 5
-; P8-NEXT: bl __unordkf2
-; P8-NEXT: nop
-; P8-NEXT: cntlzw 3, 3
-; P8-NEXT: li 4, 80
-; P8-NEXT: lvx 31, 1, 4 # 16-byte Folded Reload
-; P8-NEXT: li 4, 64
-; P8-NEXT: srwi 3, 3, 5
-; P8-NEXT: lvx 30, 1, 4 # 16-byte Folded Reload
+; P8-NEXT: rldicl 4, 4, 47, 1
+; P8-NEXT: ld 3, -8(1)
+; P8-NEXT: sradi 5, 4, 63
+; P8-NEXT: clrldi 3, 3, 1
+; P8-NEXT: rldicl 6, 3, 1, 63
+; P8-NEXT: subc 3, 4, 3
+; P8-NEXT: adde 3, 6, 5
; P8-NEXT: xori 3, 3, 1
-; P8-NEXT: or 3, 3, 30
-; P8-NEXT: ld 30, 96(1) # 8-byte Folded Reload
-; P8-NEXT: addi 1, 1, 112
-; P8-NEXT: ld 0, 16(1)
-; P8-NEXT: mtlr 0
; P8-NEXT: blr
;
; P9-LABEL: abs_isinfornanq:
diff --git a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
index 3a312d2..f3ef95b 100644
--- a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
@@ -130,7 +130,7 @@ body: |
%22:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @c
%10:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @e
%13:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @a
- %14:g8rc_and_g8rc_nox0 = ADDItocL killed %13, @a, implicit $x2
+ %14:g8rc_and_g8rc_nox0 = ADDItocL8 killed %13, @a, implicit $x2
bb.2.while.body:
successors: %bb.4(0x30000000), %bb.3(0x50000000)
diff --git a/llvm/test/CodeGen/PowerPC/rldimi.ll b/llvm/test/CodeGen/PowerPC/rldimi.ll
index 322975f..78ea9aa 100644
--- a/llvm/test/CodeGen/PowerPC/rldimi.ll
+++ b/llvm/test/CodeGen/PowerPC/rldimi.ll
@@ -59,8 +59,8 @@ entry:
ret i64 %8
}
-define i64 @rldimi_intrinsic(i64 %a) {
-; CHECK-LABEL: rldimi_intrinsic:
+define i64 @rldimi4(i64 %a) {
+; CHECK-LABEL: rldimi4:
; CHECK: # %bb.0:
; CHECK-NEXT: rldimi 3, 3, 8, 0
; CHECK-NEXT: rldimi 3, 3, 16, 0
@@ -72,4 +72,71 @@ define i64 @rldimi_intrinsic(i64 %a) {
ret i64 %r3
}
+define i64 @rldimi5(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rldimi 4, 3, 8, 40
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 16776960) ; 0xffff << 8
+ ret i64 %r
+}
+
+define i64 @rldimi6(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rotldi 3, 3, 1
+; CHECK-NEXT: rldimi 4, 3, 7, 41
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 8388480) ; 0xffff << 7
+ ret i64 %r
+}
+
+define i64 @rldimi7(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rotldi 3, 3, 63
+; CHECK-NEXT: rldimi 4, 3, 9, 39
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 33553920) ; 0xffff << 9
+ ret i64 %r
+}
+
+define i64 @rldimi8(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 0, i64 0)
+ ret i64 %r
+}
+
+define i64 @rldimi9(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi9:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 63, i64 0)
+ ret i64 %r
+}
+
+define i64 @rldimi10(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi10:
+; CHECK: # %bb.0:
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 0, i64 -1)
+ ret i64 %r
+}
+
+define i64 @rldimi11(i64 %a, i64 %b) {
+; CHECK-LABEL: rldimi11:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rotldi 3, 3, 8
+; CHECK-NEXT: blr
+ %r = call i64 @llvm.ppc.rldimi(i64 %a, i64 %b, i32 8, i64 -1)
+ ret i64 %r
+}
+
declare i64 @llvm.ppc.rldimi(i64, i64, i32 immarg, i64 immarg)
diff --git a/llvm/test/CodeGen/PowerPC/rlwimi.ll b/llvm/test/CodeGen/PowerPC/rlwimi.ll
index 8b126cd..8da7695 100644
--- a/llvm/test/CodeGen/PowerPC/rlwimi.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwimi.ll
@@ -107,11 +107,51 @@ entry:
define i32 @test9(i32 %a, i32 %b) {
; CHECK-LABEL: test9:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: rlwimi 3, 4, 8, 20, 26
+; CHECK-NEXT: rlwimi 4, 3, 8, 20, 26
+; CHECK-NEXT: mr 3, 4
; CHECK-NEXT: blr
entry:
%r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 8, i32 4064)
ret i32 %r
}
+define i32 @test10(i32 %a, i32 %b) {
+; CHECK-LABEL: test10:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 0, i32 -1)
+ ret i32 %r
+}
+
+define i32 @test11(i32 %a, i32 %b) {
+; CHECK-LABEL: test11:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: rotlwi 3, 3, 8
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 8, i32 -1)
+ ret i32 %r
+}
+
+define i32 @test12(i32 %a, i32 %b) {
+; CHECK-LABEL: test12:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %r
+}
+
+define i32 @test13(i32 %a, i32 %b) {
+; CHECK-LABEL: test13:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: rlwimi 3, 4, 0, 27, 19
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwimi(i32 %a, i32 %b, i32 0, i32 4064)
+ ret i32 %r
+}
+
declare i32 @llvm.ppc.rlwimi(i32, i32, i32 immarg, i32 immarg)
diff --git a/llvm/test/CodeGen/PowerPC/rlwinm.ll b/llvm/test/CodeGen/PowerPC/rlwinm.ll
index c6d4e5b..363eb17 100644
--- a/llvm/test/CodeGen/PowerPC/rlwinm.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwinm.ll
@@ -97,4 +97,24 @@ entry:
ret i32 %r
}
+define i32 @test10(i32 %a, i32 %s) {
+; CHECK-LABEL: test10:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwnm(i32 %a, i32 %s, i32 0)
+ ret i32 %r
+}
+
+define i32 @test11(i32 %a, i32 %s) {
+; CHECK-LABEL: test11:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: rotlw 3, 3, 4
+; CHECK-NEXT: blr
+entry:
+ %r = call i32 @llvm.ppc.rlwnm(i32 %a, i32 %s, i32 -1)
+ ret i32 %r
+}
+
declare i32 @llvm.ppc.rlwnm(i32, i32, i32 immarg)
diff --git a/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
index 6f68679..798637b 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
@@ -7281,3 +7281,61 @@ entry:
store double %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
+
+define dso_local void @st_reversed_double_from_i8(ptr %ptr) {
+; CHECK-P10-LABEL: st_reversed_double_from_i8:
+; CHECK-P10: # %bb.0: # %entry
+; CHECK-P10-NEXT: li r4, 8
+; CHECK-P10-NEXT: lxsibzx f0, 0, r3
+; CHECK-P10-NEXT: xxspltidp vs2, -1023410176
+; CHECK-P10-NEXT: lxsibzx f1, r3, r4
+; CHECK-P10-NEXT: xscvuxddp f0, f0
+; CHECK-P10-NEXT: xscvuxddp f1, f1
+; CHECK-P10-NEXT: xsadddp f0, f0, f2
+; CHECK-P10-NEXT: xsadddp f1, f1, f2
+; CHECK-P10-NEXT: stfd f1, 0(r3)
+; CHECK-P10-NEXT: stfd f0, 8(r3)
+; CHECK-P10-NEXT: blr
+;
+; CHECK-P9-LABEL: st_reversed_double_from_i8:
+; CHECK-P9: # %bb.0: # %entry
+; CHECK-P9-NEXT: li r4, 8
+; CHECK-P9-NEXT: lxsibzx f0, 0, r3
+; CHECK-P9-NEXT: lxsibzx f1, r3, r4
+; CHECK-P9-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P9-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P9-NEXT: xscvuxddp f0, f0
+; CHECK-P9-NEXT: xscvuxddp f1, f1
+; CHECK-P9-NEXT: xsadddp f0, f0, f2
+; CHECK-P9-NEXT: xsadddp f1, f1, f2
+; CHECK-P9-NEXT: stfd f0, 8(r3)
+; CHECK-P9-NEXT: stfd f1, 0(r3)
+; CHECK-P9-NEXT: blr
+;
+; CHECK-P8-LABEL: st_reversed_double_from_i8:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: lbz r4, 0(r3)
+; CHECK-P8-NEXT: lbz r5, 8(r3)
+; CHECK-P8-NEXT: mtfprwz f0, r4
+; CHECK-P8-NEXT: mtfprwz f1, r5
+; CHECK-P8-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P8-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P8-NEXT: xscvuxddp f0, f0
+; CHECK-P8-NEXT: xscvuxddp f1, f1
+; CHECK-P8-NEXT: xsadddp f0, f0, f2
+; CHECK-P8-NEXT: xsadddp f1, f1, f2
+; CHECK-P8-NEXT: stfd f1, 0(r3)
+; CHECK-P8-NEXT: stfd f0, 8(r3)
+; CHECK-P8-NEXT: blr
+entry:
+ %idx = getelementptr inbounds i8, ptr %ptr, i64 8
+ %i0 = load i8, ptr %ptr, align 1
+ %i1 = load i8, ptr %idx, align 1
+ %f0 = uitofp i8 %i0 to double
+ %f1 = uitofp i8 %i1 to double
+ %a0 = fadd double %f0, -1.280000e+02
+ %a1 = fadd double %f1, -1.280000e+02
+ store double %a1, ptr %ptr, align 8
+ store double %a0, ptr %idx, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
index 824dd4c..f396057 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
@@ -7271,3 +7271,61 @@ entry:
store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
+
+define dso_local void @st_reversed_float_from_i8(ptr %ptr) {
+; CHECK-P10-LABEL: st_reversed_float_from_i8:
+; CHECK-P10: # %bb.0: # %entry
+; CHECK-P10-NEXT: li r4, 8
+; CHECK-P10-NEXT: lxsibzx f0, 0, r3
+; CHECK-P10-NEXT: xxspltidp vs2, -1023410176
+; CHECK-P10-NEXT: lxsibzx f1, r3, r4
+; CHECK-P10-NEXT: xscvuxdsp f0, f0
+; CHECK-P10-NEXT: xscvuxdsp f1, f1
+; CHECK-P10-NEXT: xsaddsp f0, f0, f2
+; CHECK-P10-NEXT: xsaddsp f1, f1, f2
+; CHECK-P10-NEXT: stfs f0, 8(r3)
+; CHECK-P10-NEXT: stfs f1, 0(r3)
+; CHECK-P10-NEXT: blr
+;
+; CHECK-P9-LABEL: st_reversed_float_from_i8:
+; CHECK-P9: # %bb.0: # %entry
+; CHECK-P9-NEXT: li r4, 8
+; CHECK-P9-NEXT: lxsibzx f0, 0, r3
+; CHECK-P9-NEXT: lxsibzx f1, r3, r4
+; CHECK-P9-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P9-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P9-NEXT: xscvuxdsp f0, f0
+; CHECK-P9-NEXT: xscvuxdsp f1, f1
+; CHECK-P9-NEXT: xsaddsp f0, f0, f2
+; CHECK-P9-NEXT: xsaddsp f1, f1, f2
+; CHECK-P9-NEXT: stfs f0, 8(r3)
+; CHECK-P9-NEXT: stfs f1, 0(r3)
+; CHECK-P9-NEXT: blr
+;
+; CHECK-P8-LABEL: st_reversed_float_from_i8:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: lbz r4, 0(r3)
+; CHECK-P8-NEXT: lbz r5, 8(r3)
+; CHECK-P8-NEXT: mtfprwz f0, r4
+; CHECK-P8-NEXT: mtfprwz f1, r5
+; CHECK-P8-NEXT: addis r4, r2, .LCPI300_0@toc@ha
+; CHECK-P8-NEXT: lfs f2, .LCPI300_0@toc@l(r4)
+; CHECK-P8-NEXT: xscvuxdsp f0, f0
+; CHECK-P8-NEXT: xscvuxdsp f1, f1
+; CHECK-P8-NEXT: xsaddsp f0, f0, f2
+; CHECK-P8-NEXT: xsaddsp f1, f1, f2
+; CHECK-P8-NEXT: stfs f1, 0(r3)
+; CHECK-P8-NEXT: stfs f0, 8(r3)
+; CHECK-P8-NEXT: blr
+entry:
+ %idx = getelementptr inbounds i8, ptr %ptr, i64 8
+ %i0 = load i8, ptr %ptr, align 1
+ %i1 = load i8, ptr %idx, align 1
+ %f0 = uitofp i8 %i0 to float
+ %f1 = uitofp i8 %i1 to float
+ %a0 = fadd float %f0, -1.280000e+02
+ %a1 = fadd float %f1, -1.280000e+02
+ store float %a1, ptr %ptr, align 8
+ store float %a0, ptr %idx, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
index c8278e5..8748767 100644
--- a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
@@ -29,9 +29,7 @@ define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %ar
; CHECK-NEXT: nop
; CHECK-NEXT: # kill: def $r3 killed $r3 killed $x3
; CHECK-NEXT: cmpwi 3, 0
-; CHECK-NEXT: crmove 20, 10
; CHECK-NEXT: crorc 20, 10, 2
-; CHECK-NEXT: crmove 21, 2
; CHECK-NEXT: bc 4, 20, .LBB0_4
; CHECK-NEXT: # %bb.2: # %if.end5
; CHECK-NEXT: addis 3, 2, .L.str@toc@ha
@@ -76,11 +74,9 @@ define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %ar
; BE-NEXT: addi 3, 31, 128
; BE-NEXT: bl _setjmp
; BE-NEXT: nop
-; BE-NEXT: crmove 20, 10
; BE-NEXT: # kill: def $r3 killed $r3 killed $x3
; BE-NEXT: cmpwi 3, 0
; BE-NEXT: crorc 20, 10, 2
-; BE-NEXT: crmove 21, 2
; BE-NEXT: bc 4, 20, .LBB0_4
; BE-NEXT: # %bb.2: # %if.end5
; BE-NEXT: addis 3, 2, .L.str@toc@ha
diff --git a/llvm/test/CodeGen/PowerPC/toc-data-large-array.ll b/llvm/test/CodeGen/PowerPC/toc-data-large-array.ll
new file mode 100644
index 0000000..90f40d9
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/toc-data-large-array.ll
@@ -0,0 +1,16 @@
+; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+
+@a = global [5 x i16] zeroinitializer, align 2 #0
+
+; Function Attrs: noinline
+define i16 @foo() #1 {
+entry:
+ %0 = load i16, ptr @a, align 2
+ ret i16 %0
+}
+
+attributes #0 = { "toc-data" }
+attributes #1 = { noinline }
+
+; CHECK-ERROR: LLVM ERROR: A GlobalVariable with size larger than a TOC entry is not currently supported by the toc data transformation.
diff --git a/llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll b/llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll
new file mode 100644
index 0000000..f870e99
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/toc-data-large-array2.ll
@@ -0,0 +1,8 @@
+; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s --check-prefix CHECK-ERROR
+
+@a = global [5 x i16] zeroinitializer, align 2 #0
+
+attributes #0 = { "toc-data" }
+
+; CHECK-ERROR: LLVM ERROR: A GlobalVariable with size larger than a TOC entry is not currently supported by the toc data transformation.
diff --git a/llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll b/llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll
new file mode 100644
index 0000000..a5c9a8b
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/toc-data-struct-array.ll
@@ -0,0 +1,110 @@
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s | FileCheck %s --check-prefix CHECK
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s --check-prefix CHECK
+
+; RUN: llc -filetype=obj -mtriple powerpc-ibm-aix-xcoff < %s -o %t32.o
+; RUN: llvm-readobj %t32.o --syms | FileCheck %s --check-prefix=OBJ32
+; RUN: llc -filetype=obj -mtriple powerpc64-ibm-aix-xcoff < %s -o %t64.o
+; RUN: llvm-readobj %t64.o --syms | FileCheck %s --check-prefix=OBJ64
+
+%struct.small_struct = type { i16 }
+
+@a = global %struct.small_struct zeroinitializer, align 2 #0
+@b = global [2 x i16] zeroinitializer, align 2 #0
+
+; Function Attrs: noinline
+define i16 @foo() #1 {
+entry:
+ %0 = load i16, ptr @a, align 2
+ %1 = load i16, ptr @b, align 2
+ %add = add nsw i16 %0, %1
+ ret i16 %add
+}
+
+attributes #0 = { "toc-data" }
+attributes #1 = { noinline }
+
+; CHECK: .toc
+; CHECK-NEXT: .csect a[TD],2
+; CHECK-NEXT: .globl a[TD] # @a
+; CHECK-NEXT: .align 1
+; CHECK-NEXT: .space 2
+; CHECK-NEXT: .csect b[TD],2
+; CHECK-NEXT: .globl b[TD] # @b
+; CHECK-NEXT: .align 1
+; CHECK-NEXT: .space 4
+
+; OBJ32: Symbol {
+; OBJ32: Name: a
+; OBJ32-NEXT: Value (RelocatableAddress): 0x3C
+; OBJ32-NEXT: Section: .data
+; OBJ32-NEXT: Type: 0x0
+; OBJ32-NEXT: StorageClass: C_EXT (0x2)
+; OBJ32-NEXT: NumberOfAuxEntries: 1
+; OBJ32-NEXT: CSECT Auxiliary Entry {
+; OBJ32-NEXT: Index: {{[0-9]+}}
+; OBJ32-NEXT: SectionLen: 2
+; OBJ32-NEXT: ParameterHashIndex: 0x0
+; OBJ32-NEXT: TypeChkSectNum: 0x0
+; OBJ32-NEXT: SymbolAlignmentLog2: 2
+; OBJ32-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ32-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ32-NEXT: StabInfoIndex: 0x0
+; OBJ32-NEXT: StabSectNum: 0x0
+; OBJ32-NEXT: }
+; OBJ32-NEXT: }
+; OBJ32-NEXT: Symbol {
+; OBJ32: Name: b
+; OBJ32-NEXT: Value (RelocatableAddress): 0x40
+; OBJ32-NEXT: Section: .data
+; OBJ32-NEXT: Type: 0x0
+; OBJ32-NEXT: StorageClass: C_EXT (0x2)
+; OBJ32-NEXT: NumberOfAuxEntries: 1
+; OBJ32-NEXT: CSECT Auxiliary Entry {
+; OBJ32-NEXT: Index: {{[0-9]+}}
+; OBJ32-NEXT: SectionLen: 4
+; OBJ32-NEXT: ParameterHashIndex: 0x0
+; OBJ32-NEXT: TypeChkSectNum: 0x0
+; OBJ32-NEXT: SymbolAlignmentLog2: 2
+; OBJ32-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ32-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ32-NEXT: StabInfoIndex: 0x0
+; OBJ32-NEXT: StabSectNum: 0x0
+; OBJ32-NEXT: }
+; OBJ32-NEXT: }
+
+; OBJ64: Symbol {
+; OBJ64: Name: a
+; OBJ64-NEXT: Value (RelocatableAddress): 0x48
+; OBJ64-NEXT: Section: .data
+; OBJ64-NEXT: Type: 0x0
+; OBJ64-NEXT: StorageClass: C_EXT (0x2)
+; OBJ64-NEXT: NumberOfAuxEntries: 1
+; OBJ64-NEXT: CSECT Auxiliary Entry {
+; OBJ64-NEXT: Index: {{[0-9]+}}
+; OBJ64-NEXT: SectionLen: 2
+; OBJ64-NEXT: ParameterHashIndex: 0x0
+; OBJ64-NEXT: TypeChkSectNum: 0x0
+; OBJ64-NEXT: SymbolAlignmentLog2: 2
+; OBJ64-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ64-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ64-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; OBJ64-NEXT: }
+; OBJ64-NEXT: }
+; OBJ64-NEXT: Symbol {
+; OBJ64: Name: b
+; OBJ64-NEXT: Value (RelocatableAddress): 0x4C
+; OBJ64-NEXT: Section: .data
+; OBJ64-NEXT: Type: 0x0
+; OBJ64-NEXT: StorageClass: C_EXT (0x2)
+; OBJ64-NEXT: NumberOfAuxEntries: 1
+; OBJ64-NEXT: CSECT Auxiliary Entry {
+; OBJ64-NEXT: Index: {{[0-9]+}}
+; OBJ64-NEXT: SectionLen: 4
+; OBJ64-NEXT: ParameterHashIndex: 0x0
+; OBJ64-NEXT: TypeChkSectNum: 0x0
+; OBJ64-NEXT: SymbolAlignmentLog2: 2
+; OBJ64-NEXT: SymbolType: XTY_SD (0x1)
+; OBJ64-NEXT: StorageMappingClass: XMC_TD (0x10)
+; OBJ64-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; OBJ64-NEXT: }
+; OBJ64-NEXT: }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir
new file mode 100644
index 0000000..42bf321
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir
@@ -0,0 +1,345 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: select_nxv1i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 2 x s8>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv4i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 8 x s8>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv16i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 32 x s8>) = G_SELECT %0(<vscale x 32 x s1>), %1, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: select_nxv64i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 1 x s16>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv2i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 4 x s16>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv8i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 16 x s16>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: select_nxv32i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 1 x s32>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv2i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 4 x s32>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: select_nxv8i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 16 x s32>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: select_nxv1i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 2 x s64>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: select_nxv4i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 8 x s64>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir
new file mode 100644
index 0000000..27dfb3f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir
@@ -0,0 +1,300 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v,+m -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: test_1_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 3
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 2
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 2
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 3
+ %3:gprb(s32) = G_LSHR %1, %2(s32)
+ %4:gprb(s32) = G_CONSTANT i32 3
+ %0:gprb(s32) = G_MUL %3, %4
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 1
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: $x10 = COPY [[PseudoReadVLENB]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s32) = G_READ_VLENB
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SLLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 1
+ %0:gprb(s32) = G_SHL %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PseudoReadVLENB]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 5
+ %0:gprb(s32) = G_MUL %1, %2
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_1_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 1
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 2
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 2
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 3
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 4
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 4
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 8
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 16
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 16
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 40
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 40
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir
new file mode 100644
index 0000000..4a96be2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir
@@ -0,0 +1,139 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v,+m -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: test_1
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 3
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 2
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 2
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 3
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ %3:gprb(s64) = G_CONSTANT i64 3
+ %4:gprb(s64) = G_MUL %2, %3
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: $x10 = COPY [[PseudoReadVLENB]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ $x10 = COPY %0(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SLLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_SHL %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PseudoReadVLENB]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 5
+ %2:gprb(s64) = G_MUL %0, %1
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
index 11789a0..5f52030 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
@@ -14,7 +14,7 @@ body: |
; CHECK-LABEL: name: test_trap
; CHECK: UNIMP
; CHECK-NEXT: PseudoRET
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
PseudoRET
...
@@ -28,7 +28,7 @@ body: |
; CHECK-LABEL: name: test_debugtrap
; CHECK: EBREAK
; CHECK-NEXT: PseudoRET
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.debugtrap)
+ G_DEBUGTRAP
PseudoRET
...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
new file mode 100644
index 0000000..31b3c3f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
@@ -0,0 +1,948 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i8>, ptr %pa
+ ret <vscale x 1 x i8> %va
+}
+
+define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i8>, ptr %pa
+ ret <vscale x 2 x i8> %va
+}
+
+define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i8>, ptr %pa
+ ret <vscale x 4 x i8> %va
+}
+
+define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx8i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 8 x i8>, ptr %pa
+ ret <vscale x 8 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx32i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx32i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 32 x i8>, ptr %pa
+ ret <vscale x 32 x i8> %va
+}
+
+define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx64i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx64i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 64 x i8>, ptr %pa
+ ret <vscale x 64 x i8> %va
+}
+
+define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i16>, ptr %pa
+ ret <vscale x 1 x i16> %va
+}
+
+define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i16>, ptr %pa
+ ret <vscale x 2 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx8i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 8 x i16>, ptr %pa
+ ret <vscale x 8 x i16> %va
+}
+
+define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx16i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 16 x i16>, ptr %pa
+ ret <vscale x 16 x i16> %va
+}
+
+define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx32i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx32i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 32 x i16>, ptr %pa
+ ret <vscale x 32 x i16> %va
+}
+
+define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i32>, ptr %pa
+ ret <vscale x 1 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx4i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 4 x i32>, ptr %pa
+ ret <vscale x 4 x i32> %va
+}
+
+define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx8i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 8 x i32>, ptr %pa
+ ret <vscale x 8 x i32> %va
+}
+
+define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx16i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 16 x i32>, ptr %pa
+ ret <vscale x 16 x i32> %va
+}
+
+define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x i64>, ptr %pa
+ ret <vscale x 1 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx4i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %va = load <vscale x 4 x i64>, ptr %pa
+ ret <vscale x 4 x i64> %va
+}
+
+define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: vload_nx8i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 8 x i64>, ptr %pa
+ ret <vscale x 8 x i64> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 1
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 2
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx16i8_align64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx16i8_align64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 16 x i8>, ptr %pa, align 64
+ ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 1
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 2
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align4
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align4
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 4
+ ret <vscale x 4 x i16> %va
+}
+define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx4i16_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx4i16_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 4 x i16>, ptr %pa, align 16
+ ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 2
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align4
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align4
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 4
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 16
+ ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i32_align256
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2i32_align256
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 2 x i32>, ptr %pa, align 256
+ ret <vscale x 2 x i32> %va
+}
+define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align4
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align4
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 4
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 8
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2i64_align32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: vload_nx2i64_align32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2 x i64>, ptr %pa, align 32
+ ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx1ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx1ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %va = load <vscale x 1 x ptr>, ptr %pa
+ ret <vscale x 1 x ptr> %va
+}
+
+define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx2ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: vload_nx2ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %va = load <vscale x 2x ptr>, ptr %pa
+ ret <vscale x 2 x ptr> %va
+}
+
+define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) {
+ ; RV32-LABEL: name: vload_nx8ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: vload_nx8ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %va = load <vscale x 8 x ptr>, ptr %pa
+ ret <vscale x 8 x ptr> %va
+}
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index d169eb3..b3c62df 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -89,10 +89,12 @@ body: |
; CHECK-NEXT: %yhi:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %xlo, %ylo
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %ylo
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %xhi, %yhi
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%xlo:_(s32) = COPY $x10
%xhi:_(s32) = COPY $x11
@@ -121,10 +123,12 @@ body: |
; CHECK-NEXT: %hi2:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%lo1:_(s32) = COPY $x10
%hi1:_(s32) = COPY $x11
@@ -152,6 +156,7 @@ body: |
; CHECK-NEXT: %hi2:_(s32) = COPY $x15
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
@@ -159,11 +164,13 @@ body: |
; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[COPY2]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index f394e4d..6e76bb0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -121,10 +121,12 @@ body: |
; CHECK-NEXT: %y01:_(s64) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %x00, %y00
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %y00
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %x01, %y01
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%x00:_(s64) = COPY $x10
%x01:_(s64) = COPY $x11
@@ -153,10 +155,12 @@ body: |
; CHECK-NEXT: %hi2:_(s64) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%lo1:_(s64) = COPY $x10
%hi1:_(s64) = COPY $x11
@@ -184,6 +188,7 @@ body: |
; CHECK-NEXT: %hi2:_(s64) = COPY $x15
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
@@ -194,14 +199,16 @@ body: |
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[COPY2]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
index c348ec6..9227e65 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
@@ -92,7 +92,8 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -119,21 +120,23 @@ body: |
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[ADD2]](s32), [[COPY1]]
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[COPY1]]
- ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY]]
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY]]
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY3]](s32), [[C1]]
; CHECK-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
; CHECK-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY2]](s32), [[C]]
; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s32), [[ICMP6]], [[ICMP4]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SELECT1]], [[SELECT]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY4]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY5]](s32)
; CHECK-NEXT: $x12 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%2:_(s32) = COPY $x10
@@ -241,7 +244,8 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[SUB]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -377,7 +381,8 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -404,14 +409,16 @@ body: |
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD2]](s32), [[COPY3]]
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[COPY3]]
- ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY2]]
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY4]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY5]](s32)
; CHECK-NEXT: $x12 = COPY [[SELECT]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%2:_(s32) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
index 5506f52..8acaff5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
@@ -125,8 +125,9 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
@@ -261,8 +262,9 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[SUB]](s64)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
@@ -364,7 +366,8 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ZEXT]](s64), [[AND]]
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY2]](s32)
; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
@@ -393,7 +396,8 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
index a890a41..354fc10 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
@@ -50,8 +50,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C15]](s32)
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C16]], [[LSHR6]]
@@ -129,8 +129,8 @@ body: |
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C15]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND10]], [[C17]](s32)
@@ -201,8 +201,8 @@ body: |
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C10]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C12]](s32)
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[LSHR8]]
@@ -267,8 +267,8 @@ body: |
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C13]](s32)
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[LSHR8]]
@@ -306,8 +306,8 @@ body: |
; RV32I-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD4]], [[C26]]
; RV32I-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C28]](s32)
; RV32I-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[LSHR17]]
@@ -389,8 +389,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C15]](s32)
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C16]], [[LSHR6]]
@@ -468,8 +468,8 @@ body: |
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C15]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND10]], [[C17]](s32)
@@ -540,8 +540,8 @@ body: |
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C10]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C12]](s32)
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[LSHR8]]
@@ -606,8 +606,8 @@ body: |
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C13]](s32)
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[LSHR8]]
@@ -645,8 +645,8 @@ body: |
; RV32I-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD4]], [[C26]]
; RV32I-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C28]](s32)
; RV32I-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[LSHR17]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
index add8a56..38a4b9c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
@@ -283,8 +283,8 @@ body: |
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C11]]
; RV64I-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C13]](s64)
; RV64I-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C14]], [[LSHR9]]
@@ -583,8 +583,8 @@ body: |
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C11]]
; RV64I-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C13]](s64)
; RV64I-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C14]], [[LSHR9]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
index d4eb5eb..c64669c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
@@ -35,8 +35,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -90,8 +90,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C10]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[C9]](s32)
@@ -143,8 +143,8 @@ body: |
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -190,8 +190,8 @@ body: |
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C8]](s32)
@@ -210,8 +210,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C14]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C15]](s32)
; RV32I-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
index e2434ba..196b367 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
@@ -205,8 +205,8 @@ body: |
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C5]]
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
index 19555a7..372beca 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
@@ -39,8 +39,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C10]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -98,8 +98,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C11]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[C10]](s32)
@@ -155,8 +155,8 @@ body: |
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C8]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -208,8 +208,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR3]], [[C10]]
@@ -234,8 +234,8 @@ body: |
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD6]], [[C17]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C19]](s32)
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD3]], [[LSHR7]]
; RV32I-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -304,8 +304,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C10]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -363,8 +363,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C11]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[C10]](s32)
@@ -420,8 +420,8 @@ body: |
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C8]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -473,8 +473,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR3]], [[C10]]
@@ -499,8 +499,8 @@ body: |
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD6]], [[C17]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C19]](s32)
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD3]], [[LSHR7]]
; RV32I-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
index e030e3c..e51a214 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
@@ -221,8 +221,8 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
@@ -457,8 +457,8 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
index 433d6e6..ec2dc56 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
@@ -162,8 +162,10 @@ body: |
; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH %lo1, %lo2
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL2]]
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[UMULH]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; CHECK-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL %hi1, %lo2
; CHECK-NEXT: [[MUL4:%[0-9]+]]:_(s32) = G_MUL %mid1, %mid2
@@ -171,13 +173,18 @@ body: |
; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH %mid1, %lo2
; CHECK-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH %lo1, %mid2
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL3]], [[MUL4]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[MUL5]]
- ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH1]]
- ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[UMULH2]]
- ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD6]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[MUL5]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[UMULH1]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+ ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
+ ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[COPY5]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD7]](s32)
; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD1]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD7]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[COPY6]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
index 09e002e..39d9c5b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
@@ -194,8 +194,10 @@ body: |
; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH %lo1, %lo2
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[MUL2]]
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]]
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[UMULH]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), [[UMULH]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD1]](s64)
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ICMP]], [[ICMP1]]
; CHECK-NEXT: [[MUL3:%[0-9]+]]:_(s64) = G_MUL %hi1, %lo2
; CHECK-NEXT: [[MUL4:%[0-9]+]]:_(s64) = G_MUL %mid1, %mid2
@@ -203,13 +205,18 @@ body: |
; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(s64) = G_UMULH %mid1, %lo2
; CHECK-NEXT: [[UMULH2:%[0-9]+]]:_(s64) = G_UMULH %lo1, %mid2
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD [[MUL3]], [[MUL4]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[MUL5]]
- ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s64) = G_ADD [[ADD4]], [[UMULH1]]
- ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s64) = G_ADD [[ADD5]], [[UMULH2]]
- ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s64) = G_ADD [[ADD6]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD3]](s64)
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[MUL5]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s64) = G_ADD [[COPY3]], [[UMULH1]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD5]](s64)
+ ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s64) = G_ADD [[COPY4]], [[UMULH2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ADD6]](s64)
+ ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s64) = G_ADD [[COPY5]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ADD7]](s64)
; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD1]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD7]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[COPY6]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
index f9eda12..16542f58 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
@@ -14,7 +14,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY1]](p0) :: (load (p0))
- ; CHECK-NEXT: G_STORE [[COPY]](p0), [[LOAD]](p0) :: (store (p0))
+ ; CHECK-NEXT: G_STORE [[LOAD]](p0), [[COPY]](p0) :: (store (p0))
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(p0) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir
new file mode 100644
index 0000000..7b5d568
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-bitcast.mir
@@ -0,0 +1,356 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: bitcast_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s1>) = G_BITCAST [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_BITCAST %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 1 x s16>) = G_BITCAST [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s16>) = G_BITCAST %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s16>) = G_BITCAST [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s16>) = G_BITCAST %0(<vscale x 4 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 64 x s1>) = G_BITCAST [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_BITCAST %0(<vscale x 8 x s8>)
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s32>) = G_BITCAST [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s32>) = G_BITCAST %0(<vscale x 16 x s8>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s16>) = G_BITCAST [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s16>) = G_BITCAST %0(<vscale x 32 x s8>)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s64>) = G_BITCAST [[DEF]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s64>) = G_BITCAST %0(<vscale x 64 x s8>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: bitcast_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s8>) = G_BITCAST [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s8>) = G_BITCAST %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 1 x s32>) = G_BITCAST [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s32>) = G_BITCAST %0(<vscale x 2 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 1 x s64>) = G_BITCAST [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s64>) = G_BITCAST %0(<vscale x 4 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s64>) = G_BITCAST %0(<vscale x 8 x s16>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s32>) = G_BITCAST [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s32>) = G_BITCAST %0(<vscale x 16 x s16>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s64>) = G_BITCAST [[DEF]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s64>) = G_BITCAST %0(<vscale x 32 x s16>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: bitcast_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s16>) = G_BITCAST [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s16>) = G_BITCAST %0(<vscale x 1 x s32>)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s16>) = G_BITCAST %0(<vscale x 2 x s32>)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s16>) = G_BITCAST [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s16>) = G_BITCAST %0(<vscale x 4 x s32>)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s64>) = G_BITCAST [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s64>) = G_BITCAST %0(<vscale x 8 x s32>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 32 x s16>) = G_BITCAST [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s16>) = G_BITCAST %0(<vscale x 16 x s32>)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: bitcast_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s32>) = G_BITCAST %0(<vscale x 1 x s64>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: bitcast_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s32>) = G_BITCAST [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s32>) = G_BITCAST %0(<vscale x 2 x s64>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: bitcast_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s16>) = G_BITCAST [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: $v8m4 = COPY [[BITCAST]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s16>) = G_BITCAST %0(<vscale x 4 x s64>)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: bitcast_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: bitcast_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 64 x s8>) = G_BITCAST [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: $v8m8 = COPY [[BITCAST]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s8>) = G_BITCAST %0(<vscale x 8 x s64>)
+ $v8m8 = COPY %1(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir
new file mode 100644
index 0000000..8ee4086
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir
@@ -0,0 +1,410 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: implicitdef_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv64i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv64i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv32i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir
new file mode 100644
index 0000000..6e1d4aa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir
@@ -0,0 +1,400 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: select_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s8>), %2(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s8>), %2(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s8>), %2(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s8>), %2(<vscale x 8 x s8>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s8>), %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s8>), %2(<vscale x 32 x s8>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv64i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SELECT %1(<vscale x 64 x s1>), %2(<vscale x 64 x s8>), %2(<vscale x 64 x s8>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s16>), %2(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s16>), %2(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s16>), %2(<vscale x 4 x s16>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s16>), %2(<vscale x 8 x s16>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s16>), %2(<vscale x 16 x s16>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv32i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s16>), %2(<vscale x 32 x s16>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s32>), %2(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s32>), %2(<vscale x 2 x s32>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s32>), %2(<vscale x 4 x s32>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s32>), %2(<vscale x 8 x s32>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s32>), %2(<vscale x 16 x s32>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s64>), %2(<vscale x 1 x s64>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s64>), %2(<vscale x 2 x s64>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s64>), %2(<vscale x 4 x s64>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s64>), %2(<vscale x 8 x s64>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir
new file mode 100644
index 0000000..899f795
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir
@@ -0,0 +1,228 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v,+m -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_1_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_1_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 1
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_2_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_2_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 2
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_3_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_3_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 3
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_4_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_4_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 4
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_8_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_8_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: $x10 = COPY [[READ_VLENB]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 8
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_16_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_16_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[SHL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 16
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_40_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_40_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[READ_VLENB]], [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 40
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+
+---
+name: test_1_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 1
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_2_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 2
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_3_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 3
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_4_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 4
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_8_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 8
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_16_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 16
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_40_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 40
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir
new file mode 100644
index 0000000..c0453a0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir
@@ -0,0 +1,110 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v,+m -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_1
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_1
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 1
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_2
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_2
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 2
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_3
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_3
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 3
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_4
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_4
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 4
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_8
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: $x10 = COPY [[READ_VLENB]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 8
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_16
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[SHL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 16
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_40
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_40
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[READ_VLENB]], [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 40
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
new file mode 100644
index 0000000..ef1e355
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
@@ -0,0 +1,425 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+---
+name: implicitdef_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
new file mode 100644
index 0000000..4dc077a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
@@ -0,0 +1,558 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: select_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s8>), %2(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s8>), %2(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s8>), %2(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s8>), %2(<vscale x 8 x s8>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s8>), %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s8>), %2(<vscale x 32 x s8>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SELECT %1(<vscale x 64 x s1>), %2(<vscale x 64 x s8>), %2(<vscale x 64 x s8>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s16>), %2(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s16>), %2(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s16>), %2(<vscale x 4 x s16>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s16>), %2(<vscale x 8 x s16>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s16>), %2(<vscale x 16 x s16>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s16>), %2(<vscale x 32 x s16>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s32>), %2(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s32>), %2(<vscale x 2 x s32>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s32>), %2(<vscale x 4 x s32>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s32>), %2(<vscale x 8 x s32>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s32>), %2(<vscale x 16 x s32>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s64>), %2(<vscale x 1 x s64>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s64>), %2(<vscale x 2 x s64>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s64>), %2(<vscale x 4 x s64>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s64>), %2(<vscale x 8 x s64>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir
new file mode 100644
index 0000000..ae3bb0a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir
@@ -0,0 +1,48 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+
+---
+name: test_s32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s32) = G_READ_VLENB
+ %2:_(s32) = G_CONSTANT i32 3
+ %0:_(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_s64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gprb(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:_(s32) = G_READ_VLENB
+ %18:_(s32) = G_CONSTANT i32 3
+ %2:_(s32) = G_LSHR %17, %18(s32)
+ %15:_(s32) = G_CONSTANT i32 1
+ %9:_(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir
new file mode 100644
index 0000000..a7446d9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir
@@ -0,0 +1,25 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+
+---
+name: test
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = G_READ_VLENB
+ %2:_(s64) = G_CONSTANT i64 3
+ %0:_(s64) = G_LSHR %1, %2(s64)
+ $x10 = COPY %0(s64)
+ PseudoRET implicit $x10
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index 7b110e5..d55adf3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -17,6 +17,12 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d -target-abi lp64d \
; RUN: -verify-machineinstrs \
; RUN: | FileCheck -check-prefixes=RV64,LP64D %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv32 -global-isel \
+; RUN: -frame-pointer=all -target-abi ilp32 -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes=RV32-WITHFP %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \
+; RUN: -frame-pointer=all -target-abi lp64 -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes=RV64-WITHFP %s
; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
@@ -79,6 +85,67 @@ define i32 @va1(ptr %fmt, ...) {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, -20(s0)
+; RV64-WITHFP-NEXT: lwu a1, -24(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: or a0, a0, a1
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: srli a2, a1, 32
+; RV64-WITHFP-NEXT: sw a1, -24(s0)
+; RV64-WITHFP-NEXT: sw a2, -20(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va, align 4
@@ -131,6 +198,58 @@ define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -212,6 +331,78 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 96
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_va_arg_alloca:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw s1, 0(a0)
+; RV32-WITHFP-NEXT: addi a0, s1, 15
+; RV32-WITHFP-NEXT: andi a0, a0, -16
+; RV32-WITHFP-NEXT: sub a0, sp, a0
+; RV32-WITHFP-NEXT: mv sp, a0
+; RV32-WITHFP-NEXT: call notdead
+; RV32-WITHFP-NEXT: mv a0, s1
+; RV32-WITHFP-NEXT: addi sp, s0, -16
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_va_arg_alloca:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: lw s1, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, s1, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: andi a0, a0, -16
+; RV64-WITHFP-NEXT: sub a0, sp, a0
+; RV64-WITHFP-NEXT: mv sp, a0
+; RV64-WITHFP-NEXT: call notdead
+; RV64-WITHFP-NEXT: mv a0, s1
+; RV64-WITHFP-NEXT: addi sp, s0, -32
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -273,6 +464,36 @@ define void @va1_caller() nounwind {
; LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64D-NEXT: addi sp, sp, 16
; LP64D-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: lui a3, 261888
+; RV32-WITHFP-NEXT: li a4, 2
+; RV32-WITHFP-NEXT: li a2, 0
+; RV32-WITHFP-NEXT: call va1
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: lui a0, %hi(.LCPI3_0)
+; RV64-WITHFP-NEXT: ld a1, %lo(.LCPI3_0)(a0)
+; RV64-WITHFP-NEXT: li a2, 2
+; RV64-WITHFP-NEXT: call va1
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
ret void
}
@@ -395,6 +616,59 @@ define i64 @va2(ptr %fmt, ...) nounwind {
; RV64-NEXT: ld a0, 0(a1)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 7
+; RV32-WITHFP-NEXT: andi a1, a0, -8
+; RV32-WITHFP-NEXT: addi a0, a0, 8
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a1)
+; RV32-WITHFP-NEXT: lw a1, 4(a1)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a1, a0, 7
+; RV64-WITHFP-NEXT: andi a1, a1, -8
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, 0(a1)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va
@@ -459,6 +733,61 @@ define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
; RV64-NEXT: srli a0, a0, 32
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: li a1, 0
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -487,6 +816,32 @@ define void @va2_caller() nounwind {
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: li a1, 1
+; RV32-WITHFP-NEXT: call va2
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: li a1, 1
+; RV64-WITHFP-NEXT: call va2
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i64 (ptr, ...) @va2(ptr undef, i32 1)
ret void
}
@@ -617,6 +972,61 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 24
+; RV32-WITHFP-NEXT: sw a3, 4(s0)
+; RV32-WITHFP-NEXT: sw a4, 8(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 12(s0)
+; RV32-WITHFP-NEXT: sw a6, 16(s0)
+; RV32-WITHFP-NEXT: sw a7, 20(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 7
+; RV32-WITHFP-NEXT: andi a3, a0, -8
+; RV32-WITHFP-NEXT: addi a0, a0, 8
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a4, 0(a3)
+; RV32-WITHFP-NEXT: lw a3, 4(a3)
+; RV32-WITHFP-NEXT: add a0, a1, a4
+; RV32-WITHFP-NEXT: sltu a1, a0, a4
+; RV32-WITHFP-NEXT: add a2, a2, a3
+; RV32-WITHFP-NEXT: add a1, a2, a1
+; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -80
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a2, 0(s0)
+; RV64-WITHFP-NEXT: sd a3, 8(s0)
+; RV64-WITHFP-NEXT: sd a4, 16(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 24(s0)
+; RV64-WITHFP-NEXT: sd a6, 32(s0)
+; RV64-WITHFP-NEXT: sd a7, 40(s0)
+; RV64-WITHFP-NEXT: addi a2, a0, 7
+; RV64-WITHFP-NEXT: andi a2, a2, -8
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, 0(a2)
+; RV64-WITHFP-NEXT: add a0, a1, a0
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 80
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va
@@ -682,6 +1092,61 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 24
+; RV32-WITHFP-NEXT: sw a3, 4(s0)
+; RV32-WITHFP-NEXT: sw a4, 8(s0)
+; RV32-WITHFP-NEXT: sw a5, 12(s0)
+; RV32-WITHFP-NEXT: sw a6, 16(s0)
+; RV32-WITHFP-NEXT: sw a7, 20(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a3, a0, 4
+; RV32-WITHFP-NEXT: sw a3, -12(s0)
+; RV32-WITHFP-NEXT: lw a3, 0(a0)
+; RV32-WITHFP-NEXT: add a0, a1, a3
+; RV32-WITHFP-NEXT: sltu a1, a0, a3
+; RV32-WITHFP-NEXT: add a1, a2, a1
+; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -80
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a2, 0(s0)
+; RV64-WITHFP-NEXT: sd a3, 8(s0)
+; RV64-WITHFP-NEXT: sd a4, 16(s0)
+; RV64-WITHFP-NEXT: sd a5, 24(s0)
+; RV64-WITHFP-NEXT: sd a6, 32(s0)
+; RV64-WITHFP-NEXT: sd a7, 40(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a2, a0, 4
+; RV64-WITHFP-NEXT: sd a2, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: add a0, a1, a0
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 80
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -718,6 +1183,39 @@ define void @va3_caller() nounwind {
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: lui a0, 5
+; RV32-WITHFP-NEXT: addi a3, a0, -480
+; RV32-WITHFP-NEXT: li a0, 2
+; RV32-WITHFP-NEXT: li a1, 1111
+; RV32-WITHFP-NEXT: li a2, 0
+; RV32-WITHFP-NEXT: call va3
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: lui a0, 5
+; RV64-WITHFP-NEXT: addiw a2, a0, -480
+; RV64-WITHFP-NEXT: li a0, 2
+; RV64-WITHFP-NEXT: li a1, 1111
+; RV64-WITHFP-NEXT: call va3
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, i32 20000)
ret void
}
@@ -745,9 +1243,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32-NEXT: addi a1, a0, 4
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: lw a1, 4(sp)
-; RV32-NEXT: mv a2, sp
; RV32-NEXT: lw s0, 0(a0)
-; RV32-NEXT: sw a2, 0(a1)
+; RV32-NEXT: sw a1, 0(sp)
; RV32-NEXT: lw a0, 0(sp)
; RV32-NEXT: call notdead
; RV32-NEXT: lw a0, 4(sp)
@@ -796,9 +1293,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: addi a1, a0, 4
; RV64-NEXT: sd a1, 8(sp)
; RV64-NEXT: ld a1, 8(sp)
-; RV64-NEXT: mv a2, sp
; RV64-NEXT: lw s0, 0(a0)
-; RV64-NEXT: sd a2, 0(a1)
+; RV64-NEXT: sd a1, 0(sp)
; RV64-NEXT: lw a0, 4(sp)
; RV64-NEXT: lwu a1, 0(sp)
; RV64-NEXT: slli a0, a0, 32
@@ -829,6 +1325,115 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 96
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va4_va_copy:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -64
+; RV32-WITHFP-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw s1, 0(a0)
+; RV32-WITHFP-NEXT: sw a1, -20(s0)
+; RV32-WITHFP-NEXT: lw a0, -20(s0)
+; RV32-WITHFP-NEXT: call notdead
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: addi a1, a1, 3
+; RV32-WITHFP-NEXT: andi a1, a1, -4
+; RV32-WITHFP-NEXT: addi a2, a1, 4
+; RV32-WITHFP-NEXT: sw a2, -16(s0)
+; RV32-WITHFP-NEXT: lw a2, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, 0(a1)
+; RV32-WITHFP-NEXT: addi a2, a2, 3
+; RV32-WITHFP-NEXT: andi a2, a2, -4
+; RV32-WITHFP-NEXT: addi a3, a2, 4
+; RV32-WITHFP-NEXT: sw a3, -16(s0)
+; RV32-WITHFP-NEXT: lw a2, 0(a2)
+; RV32-WITHFP-NEXT: add a0, a0, s1
+; RV32-WITHFP-NEXT: add a1, a1, a2
+; RV32-WITHFP-NEXT: add a0, a0, a1
+; RV32-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 64
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va4_va_copy:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -112
+; RV64-WITHFP-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 48
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: ld a1, -32(s0)
+; RV64-WITHFP-NEXT: lw s1, 0(a0)
+; RV64-WITHFP-NEXT: sd a1, -40(s0)
+; RV64-WITHFP-NEXT: lw a0, -36(s0)
+; RV64-WITHFP-NEXT: lwu a1, -40(s0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: or a0, a0, a1
+; RV64-WITHFP-NEXT: call notdead
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: ld a1, -32(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: addi a1, a1, 3
+; RV64-WITHFP-NEXT: andi a1, a1, -4
+; RV64-WITHFP-NEXT: addi a2, a1, 4
+; RV64-WITHFP-NEXT: sd a2, -32(s0)
+; RV64-WITHFP-NEXT: ld a2, -32(s0)
+; RV64-WITHFP-NEXT: lw a1, 0(a1)
+; RV64-WITHFP-NEXT: addi a2, a2, 3
+; RV64-WITHFP-NEXT: andi a2, a2, -4
+; RV64-WITHFP-NEXT: addi a3, a2, 4
+; RV64-WITHFP-NEXT: sd a3, -32(s0)
+; RV64-WITHFP-NEXT: lw a2, 0(a2)
+; RV64-WITHFP-NEXT: add a0, a0, s1
+; RV64-WITHFP-NEXT: add a1, a1, a2
+; RV64-WITHFP-NEXT: addw a0, a0, a1
+; RV64-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 112
+; RV64-WITHFP-NEXT: ret
%vargs = alloca ptr
%wargs = alloca ptr
call void @llvm.va_start(ptr %vargs)
@@ -899,6 +1504,60 @@ define i32 @va6_no_fixed_args(...) nounwind {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va6_no_fixed_args:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a0, 0(s0)
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: mv a0, s0
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va6_no_fixed_args:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a0, 0(s0)
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -993,6 +1652,85 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV64-NEXT: addiw a1, a1, 336
; RV64-NEXT: add sp, sp, a1
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_large_stack:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -2032
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 2032
+; RV32-WITHFP-NEXT: sw ra, 1996(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 1992(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 2000
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: lui a0, 24414
+; RV32-WITHFP-NEXT: addi a0, a0, -1728
+; RV32-WITHFP-NEXT: sub sp, sp, a0
+; RV32-WITHFP-NEXT: lui a0, 24414
+; RV32-WITHFP-NEXT: addi a0, a0, 272
+; RV32-WITHFP-NEXT: sub a0, s0, a0
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a1, s0, 4
+; RV32-WITHFP-NEXT: sw a1, 0(a0)
+; RV32-WITHFP-NEXT: lw a1, 0(a0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a2, a1, 4
+; RV32-WITHFP-NEXT: sw a2, 0(a0)
+; RV32-WITHFP-NEXT: lw a0, 0(a1)
+; RV32-WITHFP-NEXT: lui a1, 24414
+; RV32-WITHFP-NEXT: addi a1, a1, -1728
+; RV32-WITHFP-NEXT: add sp, sp, a1
+; RV32-WITHFP-NEXT: lw ra, 1996(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 1992(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 2032
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_large_stack:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -2032
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 2032
+; RV64-WITHFP-NEXT: sd ra, 1960(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 1952(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 1968
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: lui a0, 24414
+; RV64-WITHFP-NEXT: addiw a0, a0, -1680
+; RV64-WITHFP-NEXT: sub sp, sp, a0
+; RV64-WITHFP-NEXT: lui a0, 24414
+; RV64-WITHFP-NEXT: addiw a0, a0, 288
+; RV64-WITHFP-NEXT: sub a0, s0, a0
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: addi a1, s0, 8
+; RV64-WITHFP-NEXT: sd a1, 0(a0)
+; RV64-WITHFP-NEXT: lw a1, 4(a0)
+; RV64-WITHFP-NEXT: lwu a2, 0(a0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: slli a1, a1, 32
+; RV64-WITHFP-NEXT: or a1, a1, a2
+; RV64-WITHFP-NEXT: addi a2, a1, 4
+; RV64-WITHFP-NEXT: srli a3, a2, 32
+; RV64-WITHFP-NEXT: sw a2, 0(a0)
+; RV64-WITHFP-NEXT: sw a3, 4(a0)
+; RV64-WITHFP-NEXT: lw a0, 0(a1)
+; RV64-WITHFP-NEXT: lui a1, 24414
+; RV64-WITHFP-NEXT: addiw a1, a1, -1680
+; RV64-WITHFP-NEXT: add sp, sp, a1
+; RV64-WITHFP-NEXT: ld ra, 1960(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 1952(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 2032
+; RV64-WITHFP-NEXT: ret
%large = alloca [ 100000000 x i8 ]
%va = alloca ptr
call void @llvm.va_start(ptr %va)
@@ -1004,5 +1742,193 @@ define i32 @va_large_stack(ptr %fmt, ...) {
ret i32 %1
}
+define i32 @va_vprintf(ptr %fmt, ptr %arg_start) {
+; RV32-LABEL: va_vprintf:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: lw a0, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: lw a0, 8(sp)
+; RV32-NEXT: addi a0, a0, 3
+; RV32-NEXT: andi a0, a0, -4
+; RV32-NEXT: addi a1, a0, 4
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lw a0, 0(a0)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: va_vprintf:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd a1, 8(sp)
+; RV64-NEXT: ld a0, 8(sp)
+; RV64-NEXT: sd a0, 0(sp)
+; RV64-NEXT: ld a0, 0(sp)
+; RV64-NEXT: addi a0, a0, 3
+; RV64-NEXT: andi a0, a0, -4
+; RV64-NEXT: addi a1, a0, 4
+; RV64-NEXT: sd a1, 0(sp)
+; RV64-NEXT: lw a0, 0(a0)
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_vprintf:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -4
+; RV32-WITHFP-NEXT: .cfi_offset s0, -8
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_vprintf:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -32
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 32
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -8
+; RV64-WITHFP-NEXT: .cfi_offset s0, -16
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 32
+; RV64-WITHFP-NEXT: ret
+ %args = alloca ptr
+ %args_cp = alloca ptr
+ store ptr %arg_start, ptr %args
+ call void @llvm.va_copy(ptr %args_cp, ptr %args)
+ %width = va_arg ptr %args_cp, i32
+ call void @llvm.va_end(ptr %args_cp)
+ ret i32 %width
+}
-
+define i32 @va_printf(ptr %fmt, ...) {
+; RV32-LABEL: va_printf:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -36
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a2, 24(sp)
+; RV32-NEXT: sw a3, 28(sp)
+; RV32-NEXT: sw a4, 32(sp)
+; RV32-NEXT: addi a1, sp, 20
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lw a1, 8(sp)
+; RV32-NEXT: sw a5, 36(sp)
+; RV32-NEXT: sw a6, 40(sp)
+; RV32-NEXT: sw a7, 44(sp)
+; RV32-NEXT: call va_vprintf
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: va_printf:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -72
+; RV64-NEXT: sd a1, 24(sp)
+; RV64-NEXT: sd a2, 32(sp)
+; RV64-NEXT: sd a3, 40(sp)
+; RV64-NEXT: sd a4, 48(sp)
+; RV64-NEXT: addi a1, sp, 24
+; RV64-NEXT: sd a1, 0(sp)
+; RV64-NEXT: ld a1, 0(sp)
+; RV64-NEXT: sd a5, 56(sp)
+; RV64-NEXT: sd a6, 64(sp)
+; RV64-NEXT: sd a7, 72(sp)
+; RV64-NEXT: call va_vprintf
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_printf:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a1, s0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a1, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: call va_vprintf
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_printf:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: addi a1, s0, 8
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: ld a1, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: call va_vprintf
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
+ %args = alloca ptr
+ call void @llvm.va_start(ptr %args)
+ %arg_start = load ptr, ptr %args
+ %ret_val = call i32 @va_vprintf(ptr %fmt, ptr %arg_start)
+ call void @llvm.va_end(ptr %args)
+ ret i32 %ret_val
+}
diff --git a/llvm/test/CodeGen/RISCV/allow-check.ll b/llvm/test/CodeGen/RISCV/allow-check.ll
new file mode 100644
index 0000000..0ddb526
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/allow-check.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+; RUN: llc < %s -mtriple=riscv64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/RISCV/attributes-module-flag.ll b/llvm/test/CodeGen/RISCV/attributes-module-flag.ll
new file mode 100644
index 0000000..4580539
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/attributes-module-flag.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple=riscv32 %s -o - | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 %s -o - | FileCheck %s --check-prefix=RV64
+
+; Test generation of ELF attribute from module metadata
+
+; RV32: .attribute 5, "rv32i2p1_m2p0_zba1p0"
+; RV64: .attribute 5, "rv64i2p1_m2p0_zba1p0"
+
+define i32 @addi(i32 %a) {
+ %1 = add i32 %a, 1
+ ret i32 %1
+}
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 6, !"riscv-isa", !1}
+!1 = !{!"rv64i2p1_m2p0", !"rv64i2p1_zba1p0"}
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index 455e6e5..549d531 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -1160,8 +1160,6 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB10_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -1189,12 +1187,11 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB10_2:
; RV32I-NEXT: li a0, 32
@@ -1205,8 +1202,6 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: beqz a1, .LBB10_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -1232,14 +1227,13 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: li a0, 32
@@ -1354,19 +1348,16 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: test_ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB11_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -1377,28 +1368,26 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB11_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -1409,43 +1398,27 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB11_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB11_3
-; RV32I-NEXT: .LBB11_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB11_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB11_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -1481,14 +1454,13 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB11_2:
; RV64I-NEXT: li a0, 64
@@ -1831,8 +1803,6 @@ define i16 @test_ctlz_i16_zero_undef(i16 %a) nounwind {
define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV32I-LABEL: test_ctlz_i32_zero_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -1860,18 +1830,15 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i32_zero_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -1897,14 +1864,13 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctlz_i32_zero_undef:
@@ -2005,19 +1971,16 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-LABEL: test_ctlz_i64_zero_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB15_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -2028,28 +1991,26 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB15_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -2060,41 +2021,25 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB15_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB15_3
-; RV32I-NEXT: .LBB15_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB15_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i64_zero_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -2130,14 +2075,13 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctlz_i64_zero_undef:
@@ -2464,8 +2408,6 @@ define i16 @test_ctpop_i16(i16 %a) nounwind {
define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: test_ctpop_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: lui a2, 349525
; RV32I-NEXT: addi a2, a2, 1365
@@ -2482,18 +2424,15 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctpop_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -2508,14 +2447,13 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctpop_i32:
@@ -2578,8 +2516,6 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
;
; RV32XTHEADBB-LABEL: test_ctpop_i32:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: addi sp, sp, -16
-; RV32XTHEADBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32XTHEADBB-NEXT: srli a1, a0, 1
; RV32XTHEADBB-NEXT: lui a2, 349525
; RV32XTHEADBB-NEXT: addi a2, a2, 1365
@@ -2596,18 +2532,15 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32XTHEADBB-NEXT: lui a1, 61681
; RV32XTHEADBB-NEXT: addi a1, a1, -241
; RV32XTHEADBB-NEXT: and a0, a0, a1
-; RV32XTHEADBB-NEXT: lui a1, 4112
-; RV32XTHEADBB-NEXT: addi a1, a1, 257
-; RV32XTHEADBB-NEXT: call __mulsi3
+; RV32XTHEADBB-NEXT: slli a1, a0, 8
+; RV32XTHEADBB-NEXT: add a0, a0, a1
+; RV32XTHEADBB-NEXT: slli a1, a0, 16
+; RV32XTHEADBB-NEXT: add a0, a0, a1
; RV32XTHEADBB-NEXT: srli a0, a0, 24
-; RV32XTHEADBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: addi sp, sp, 16
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: test_ctpop_i32:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64XTHEADBB-NEXT: srli a1, a0, 1
; RV64XTHEADBB-NEXT: lui a2, 349525
; RV64XTHEADBB-NEXT: addiw a2, a2, 1365
@@ -2622,14 +2555,13 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64XTHEADBB-NEXT: srli a1, a0, 4
; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: lui a1, 61681
-; RV64XTHEADBB-NEXT: addiw a1, a1, -241
+; RV64XTHEADBB-NEXT: addi a1, a1, -241
; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 4112
-; RV64XTHEADBB-NEXT: addiw a1, a1, 257
-; RV64XTHEADBB-NEXT: call __muldi3
+; RV64XTHEADBB-NEXT: slli a1, a0, 8
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 16
+; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: srliw a0, a0, 24
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
; RV64XTHEADBB-NEXT: ret
%1 = call i32 @llvm.ctpop.i32(i32 %a)
ret i32 %1
@@ -2638,65 +2570,48 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV32I-LABEL: test_ctpop_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s2, a2, 1365
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub a1, a1, a0
-; RV32I-NEXT: lui a0, 209715
-; RV32I-NEXT: addi s3, a0, 819
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a1, a2
; RV32I-NEXT: srli a1, a1, 2
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s4, a1, -241
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a4, a1
+; RV32I-NEXT: srli a4, a1, 4
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a5, a1, 8
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: slli a5, a1, 16
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: srli a5, a0, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: and a3, a0, a2
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -2719,14 +2634,13 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctpop_i64:
@@ -2814,65 +2728,48 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
;
; RV32XTHEADBB-LABEL: test_ctpop_i64:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: addi sp, sp, -32
-; RV32XTHEADBB-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: mv s0, a0
-; RV32XTHEADBB-NEXT: srli a0, a1, 1
-; RV32XTHEADBB-NEXT: lui a2, 349525
-; RV32XTHEADBB-NEXT: addi s2, a2, 1365
-; RV32XTHEADBB-NEXT: and a0, a0, s2
-; RV32XTHEADBB-NEXT: sub a1, a1, a0
-; RV32XTHEADBB-NEXT: lui a0, 209715
-; RV32XTHEADBB-NEXT: addi s3, a0, 819
-; RV32XTHEADBB-NEXT: and a0, a1, s3
+; RV32XTHEADBB-NEXT: srli a2, a1, 1
+; RV32XTHEADBB-NEXT: lui a3, 349525
+; RV32XTHEADBB-NEXT: addi a3, a3, 1365
+; RV32XTHEADBB-NEXT: and a2, a2, a3
+; RV32XTHEADBB-NEXT: sub a1, a1, a2
+; RV32XTHEADBB-NEXT: lui a2, 209715
+; RV32XTHEADBB-NEXT: addi a2, a2, 819
+; RV32XTHEADBB-NEXT: and a4, a1, a2
; RV32XTHEADBB-NEXT: srli a1, a1, 2
-; RV32XTHEADBB-NEXT: and a1, a1, s3
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: srli a1, a0, 4
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: lui a1, 61681
-; RV32XTHEADBB-NEXT: addi s4, a1, -241
-; RV32XTHEADBB-NEXT: and a0, a0, s4
-; RV32XTHEADBB-NEXT: lui a1, 4112
-; RV32XTHEADBB-NEXT: addi s1, a1, 257
-; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3
-; RV32XTHEADBB-NEXT: srli s5, a0, 24
-; RV32XTHEADBB-NEXT: srli a0, s0, 1
-; RV32XTHEADBB-NEXT: and a0, a0, s2
-; RV32XTHEADBB-NEXT: sub s0, s0, a0
-; RV32XTHEADBB-NEXT: and a0, s0, s3
-; RV32XTHEADBB-NEXT: srli s0, s0, 2
-; RV32XTHEADBB-NEXT: and a1, s0, s3
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: srli a1, a0, 4
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: and a0, a0, s4
-; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3
+; RV32XTHEADBB-NEXT: and a1, a1, a2
+; RV32XTHEADBB-NEXT: add a1, a4, a1
+; RV32XTHEADBB-NEXT: srli a4, a1, 4
+; RV32XTHEADBB-NEXT: add a1, a1, a4
+; RV32XTHEADBB-NEXT: lui a4, 61681
+; RV32XTHEADBB-NEXT: addi a4, a4, -241
+; RV32XTHEADBB-NEXT: and a1, a1, a4
+; RV32XTHEADBB-NEXT: slli a5, a1, 8
+; RV32XTHEADBB-NEXT: add a1, a1, a5
+; RV32XTHEADBB-NEXT: slli a5, a1, 16
+; RV32XTHEADBB-NEXT: add a1, a1, a5
+; RV32XTHEADBB-NEXT: srli a1, a1, 24
+; RV32XTHEADBB-NEXT: srli a5, a0, 1
+; RV32XTHEADBB-NEXT: and a3, a5, a3
+; RV32XTHEADBB-NEXT: sub a0, a0, a3
+; RV32XTHEADBB-NEXT: and a3, a0, a2
+; RV32XTHEADBB-NEXT: srli a0, a0, 2
+; RV32XTHEADBB-NEXT: and a0, a0, a2
+; RV32XTHEADBB-NEXT: add a0, a3, a0
+; RV32XTHEADBB-NEXT: srli a2, a0, 4
+; RV32XTHEADBB-NEXT: add a0, a0, a2
+; RV32XTHEADBB-NEXT: and a0, a0, a4
+; RV32XTHEADBB-NEXT: slli a2, a0, 8
+; RV32XTHEADBB-NEXT: add a0, a0, a2
+; RV32XTHEADBB-NEXT: slli a2, a0, 16
+; RV32XTHEADBB-NEXT: add a0, a0, a2
; RV32XTHEADBB-NEXT: srli a0, a0, 24
-; RV32XTHEADBB-NEXT: add a0, a0, s5
+; RV32XTHEADBB-NEXT: add a0, a0, a1
; RV32XTHEADBB-NEXT: li a1, 0
-; RV32XTHEADBB-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: addi sp, sp, 32
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: test_ctpop_i64:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64XTHEADBB-NEXT: srli a1, a0, 1
; RV64XTHEADBB-NEXT: lui a2, 349525
; RV64XTHEADBB-NEXT: addiw a2, a2, 1365
@@ -2895,14 +2792,13 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64XTHEADBB-NEXT: slli a2, a1, 32
; RV64XTHEADBB-NEXT: add a1, a1, a2
; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 4112
-; RV64XTHEADBB-NEXT: addiw a1, a1, 257
-; RV64XTHEADBB-NEXT: slli a2, a1, 32
-; RV64XTHEADBB-NEXT: add a1, a1, a2
-; RV64XTHEADBB-NEXT: call __muldi3
+; RV64XTHEADBB-NEXT: slli a1, a0, 8
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 16
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 32
+; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: srli a0, a0, 56
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
; RV64XTHEADBB-NEXT: ret
%1 = call i64 @llvm.ctpop.i64(i64 %a)
ret i64 %1
diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index adf6144..9ae30e6 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -602,19 +602,16 @@ define signext i32 @ctlz(i64 %b) nounwind {
;
; RV32I-LABEL: ctlz:
; RV32I: # %bb.0: # %entry
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB7_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -625,28 +622,26 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: andi a0, a0, 63
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB7_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -657,41 +652,25 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB7_2
-; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi s1, a0, 32
-; RV32I-NEXT: j .LBB7_3
-; RV32I-NEXT: .LBB7_2:
-; RV32I-NEXT: srli s1, s1, 24
-; RV32I-NEXT: .LBB7_3: # %entry
-; RV32I-NEXT: andi a0, s1, 63
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: andi a0, a0, 63
; RV32I-NEXT: ret
;
; RV64I-LABEL: ctlz:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -727,15 +706,14 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: srli a0, a0, 58
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index 1861755..2333693 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -24,21 +24,7 @@ define double @fadd_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fadd_d:
@@ -76,21 +62,7 @@ define double @fsub_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsub_d:
@@ -128,21 +100,7 @@ define double @fmul_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmul_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmul.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmul_d:
@@ -180,21 +138,7 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fdiv_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fdiv.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fdiv_d:
@@ -232,17 +176,7 @@ define double @fsqrt_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fsqrt_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsqrt_d:
@@ -398,25 +332,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmadd_d:
@@ -463,27 +379,9 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmsub_d:
@@ -572,28 +470,10 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d:
@@ -701,28 +581,10 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
@@ -829,27 +691,9 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d:
@@ -932,27 +776,9 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index 82ddf06..a2093f5 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -25,21 +25,7 @@ define double @fadd_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fadd_d:
@@ -76,21 +62,7 @@ define double @fsub_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsub_d:
@@ -127,21 +99,7 @@ define double @fmul_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmul_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmul.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmul_d:
@@ -178,21 +136,7 @@ define double @fdiv_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fdiv_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fdiv.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fdiv_d:
@@ -231,17 +175,7 @@ define double @fsqrt_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsqrt_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsqrt_d:
@@ -280,21 +214,7 @@ define double @fsgnj_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsgnj_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnj.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsgnj_d:
@@ -335,15 +255,9 @@ define i32 @fneg_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fneg_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: fneg.d a2, a0
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fneg_d:
@@ -401,21 +315,7 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsgnjn_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnjn.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsgnjn_d:
@@ -464,23 +364,9 @@ define double @fabs_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fabs_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fabs.d a2, a0
; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fabs_d:
@@ -532,21 +418,7 @@ define double @fmin_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmin_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmin_d:
@@ -585,21 +457,7 @@ define double @fmax_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmax_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmax_d:
@@ -638,25 +496,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmadd_d:
@@ -702,27 +542,9 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmsub_d:
@@ -811,28 +633,10 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d:
@@ -940,28 +744,10 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
@@ -1060,27 +846,9 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_3:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: xor a1, a1, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_3:
@@ -1127,27 +895,9 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_nsz:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: xor a1, a1, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_nsz:
@@ -1202,27 +952,9 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d:
@@ -1305,27 +1037,9 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a2, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
@@ -1403,25 +1117,7 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmadd_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmadd_d_contract:
@@ -1482,27 +1178,9 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmsub_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmsub_d_contract:
@@ -1601,29 +1279,11 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmadd_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmadd_d_contract:
@@ -1749,28 +1409,10 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fnmsub_d_contract:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fnmsub_d_contract:
diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
index 55bf95a..99835ff 100644
--- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
@@ -141,21 +141,7 @@ define double @fcopysign_fneg(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcopysign_fneg:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnjn.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64I-LABEL: fcopysign_fneg:
diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index 2c5505e..035228e 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -89,23 +89,13 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oeq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB1_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB1_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oeq:
@@ -155,23 +145,13 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB2_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB2_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
@@ -218,23 +198,13 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ogt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB3_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB3_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ogt:
@@ -281,23 +251,13 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB4_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB4_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oge:
@@ -344,23 +304,13 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_olt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB5_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB5_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_olt:
@@ -407,23 +357,13 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ole:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB6_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB6_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ole:
@@ -474,25 +414,15 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_one:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB7_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB7_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_one:
@@ -545,25 +475,15 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ord:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB8_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB8_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ord:
@@ -616,25 +536,15 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ueq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB9_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB9_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ueq:
@@ -683,23 +593,13 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ugt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB10_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB10_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ugt:
@@ -746,23 +646,13 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_uge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB11_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB11_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_uge:
@@ -809,23 +699,13 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ult:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB12_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB12_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ult:
@@ -872,23 +752,13 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ule:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB13_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB13_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ule:
@@ -935,23 +805,13 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_une:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB14_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB14_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_une:
@@ -1002,25 +862,15 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: br_fcmp_uno:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: beqz a0, .LBB15_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
-; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB15_2: # %if.then
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_uno:
diff --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
index d46256b..57aaa4c 100644
--- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
@@ -28,21 +28,7 @@ define double @callee_double_inreg(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: callee_double_inreg:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
%1 = fadd double %a, %b
ret double %1
@@ -106,22 +92,11 @@ define double @callee_double_split_reg_stack(i32 %a, i64 %b, i64 %c, double %d,
;
; RV32IZFINXZDINX-LABEL: callee_double_split_reg_stack:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: lw a0, 16(sp)
-; RV32IZFINXZDINX-NEXT: sw a7, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a6, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, a7
+; RV32IZFINXZDINX-NEXT: lw a1, 0(sp)
+; RV32IZFINXZDINX-NEXT: mv a3, a6
+; RV32IZFINXZDINX-NEXT: mv a2, a5
; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
%1 = fadd double %d, %e
ret double %1
@@ -190,17 +165,11 @@ define double @callee_double_stack(i64 %a, i64 %b, i64 %c, i64 %d, double %e, do
;
; RV32IZFINXZDINX-LABEL: callee_double_stack:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: lw a0, 24(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 28(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 16(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 20(sp)
-; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
+; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
+; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: ret
%1 = fadd double %e, %f
ret double %1
diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index 967b119..13bcafb 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -28,13 +28,7 @@ define float @fcvt_s_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_s_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_s_d:
@@ -72,13 +66,7 @@ define double @fcvt_d_s(float %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_s:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_s:
@@ -116,13 +104,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d:
@@ -162,13 +144,7 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
@@ -210,15 +186,9 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
; RV32IZFINXZDINX-NEXT: seqz a1, a0
; RV32IZFINXZDINX-NEXT: add a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
@@ -263,13 +233,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w:
@@ -309,14 +273,8 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
@@ -357,13 +315,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
@@ -409,14 +361,8 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
@@ -661,13 +607,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
@@ -705,13 +645,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
@@ -749,13 +683,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
@@ -793,13 +721,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 3700a18..da882ca 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -20,13 +20,7 @@ define float @fcvt_s_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_s_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_s_d:
@@ -63,13 +57,7 @@ define double @fcvt_d_s(float %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_s:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_s:
@@ -106,13 +94,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d:
@@ -153,17 +135,11 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat:
@@ -287,13 +263,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
@@ -334,15 +304,9 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
; RV32IZFINXZDINX-NEXT: seqz a1, a0
; RV32IZFINXZDINX-NEXT: add a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
@@ -402,17 +366,11 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat:
@@ -512,13 +470,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w:
@@ -557,14 +509,8 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
@@ -605,13 +551,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
@@ -656,14 +596,8 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
@@ -749,47 +683,41 @@ define i64 @fcvt_l_d(double %a) nounwind {
define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IFD-LABEL: fcvt_l_d_sat:
; RV32IFD: # %bb.0: # %start
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: lui a0, %hi(.LCPI12_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI12_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI12_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB12_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB12_2
; RV32IFD-NEXT: # %bb.1: # %start
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB12_2: # %start
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB12_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI12_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB12_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB12_4: # %start
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_l_d_sat:
@@ -803,50 +731,44 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_l_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB12_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB12_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %start
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB12_2: # %start
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB12_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI12_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI12_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI12_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB12_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB12_4: # %start
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_l_d_sat:
@@ -1057,18 +979,17 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_lu_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: mv s1, a1
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: fle.d a0, a2, s0
+; RV32IZFINXZDINX-NEXT: neg s2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, s0
; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI14_0+4)(a2)
@@ -1079,11 +1000,11 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: or a0, a2, a0
; RV32IZFINXZDINX-NEXT: and a1, s2, a1
; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_lu_d_sat:
@@ -1185,21 +1106,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmv_x_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmv_x_d:
@@ -1334,13 +1241,13 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
; RV32IFD-LABEL: fmv_d_x:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw a3, 4(sp)
-; RV32IFD-NEXT: sw a2, 0(sp)
-; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: sw a0, 8(sp)
-; RV32IFD-NEXT: fld fa5, 0(sp)
+; RV32IFD-NEXT: sw a1, 12(sp)
+; RV32IFD-NEXT: fld fa5, 8(sp)
+; RV32IFD-NEXT: sw a2, 8(sp)
+; RV32IFD-NEXT: sw a3, 12(sp)
; RV32IFD-NEXT: fld fa4, 8(sp)
-; RV32IFD-NEXT: fadd.d fa0, fa4, fa5
+; RV32IFD-NEXT: fadd.d fa0, fa5, fa4
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
@@ -1353,21 +1260,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmv_d_x:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw a3, 20(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 16(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 28(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 24(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 16(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 24(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 28(sp)
-; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmv_d_x:
@@ -1406,13 +1299,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
@@ -1449,13 +1336,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
@@ -1492,13 +1373,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
@@ -1535,13 +1410,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
@@ -1731,13 +1600,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_i16:
@@ -1797,24 +1660,18 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_sat_i16:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI26_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI26_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI26_0)(a2)
; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI26_1)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI26_1+4)(a4)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI26_1)(a4)
-; RV32IZFINXZDINX-NEXT: fmax.d a2, a0, a2
-; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
-; RV32IZFINXZDINX-NEXT: neg a0, a0
-; RV32IZFINXZDINX-NEXT: fmin.d a2, a2, a4
-; RV32IZFINXZDINX-NEXT: fcvt.w.d a1, a2, rtz
-; RV32IZFINXZDINX-NEXT: and a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
+; RV32IZFINXZDINX-NEXT: neg a6, a6
+; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
+; RV32IZFINXZDINX-NEXT: and a0, a6, a0
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i16:
@@ -1948,13 +1805,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_i16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i16:
@@ -2006,11 +1857,6 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI28_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI28_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI28_0)(a2)
@@ -2018,7 +1864,6 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16:
@@ -2130,13 +1975,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_i8:
@@ -2196,24 +2035,18 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_s_sat_i8:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI30_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI30_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI30_0)(a2)
; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI30_1)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI30_1+4)(a4)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI30_1)(a4)
-; RV32IZFINXZDINX-NEXT: fmax.d a2, a0, a2
-; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
-; RV32IZFINXZDINX-NEXT: neg a0, a0
-; RV32IZFINXZDINX-NEXT: fmin.d a2, a2, a4
-; RV32IZFINXZDINX-NEXT: fcvt.w.d a1, a2, rtz
-; RV32IZFINXZDINX-NEXT: and a0, a0, a1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
+; RV32IZFINXZDINX-NEXT: neg a6, a6
+; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
+; RV32IZFINXZDINX-NEXT: and a0, a6, a0
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i8:
@@ -2344,13 +2177,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_i8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i8:
@@ -2404,11 +2231,6 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_s_sat_i8:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI32_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI32_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI32_0)(a2)
@@ -2416,7 +2238,6 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i8:
@@ -2532,17 +2353,11 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_wu_d_sat_zext:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat_zext:
@@ -2647,17 +2462,11 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_w_d_sat_sext:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat_sext:
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index 3ae2e99..e864d8f 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -24,17 +24,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_oeq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_oeq:
@@ -78,20 +68,11 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ogt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a4, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ogt:
@@ -138,20 +119,11 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_oge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_oge:
@@ -200,20 +172,11 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_olt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a0, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_olt:
@@ -260,20 +223,11 @@ define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ole:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a1, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a0, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a5, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a4, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ole:
@@ -327,25 +281,16 @@ define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_one:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: or a0, a6, a1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: or a4, a6, a5
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_one:
@@ -430,19 +375,9 @@ define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ord:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ord:
@@ -495,26 +430,17 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ueq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: or a0, a6, a1
-; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: or a4, a6, a5
+; RV32IZFINXZDINX-NEXT: xori a4, a4, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ueq:
@@ -602,21 +528,12 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ugt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ugt:
@@ -665,21 +582,12 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_uge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_uge:
@@ -730,21 +638,12 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ult:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: fle.d a1, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: fle.d a5, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ult:
@@ -793,21 +692,12 @@ define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_ule:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: csrr a0, fflags
-; RV32IZFINXZDINX-NEXT: flt.d a1, a2, a4
-; RV32IZFINXZDINX-NEXT: csrw fflags, a0
-; RV32IZFINXZDINX-NEXT: xori a0, a1, 1
-; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: csrr a4, fflags
+; RV32IZFINXZDINX-NEXT: flt.d a5, a2, a0
+; RV32IZFINXZDINX-NEXT: csrw fflags, a4
+; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
+; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
+; RV32IZFINXZDINX-NEXT: mv a0, a4
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_ule:
@@ -853,18 +743,8 @@ define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_une:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_une:
@@ -908,20 +788,10 @@ define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmp_uno:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmp_uno:
@@ -966,19 +836,9 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_oeq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: and a0, a0, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_oeq:
@@ -1021,17 +881,7 @@ define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ogt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ogt:
@@ -1071,17 +921,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_oge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_oge:
@@ -1123,17 +963,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_olt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_olt:
@@ -1173,17 +1003,7 @@ define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ole:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ole:
@@ -1225,19 +1045,9 @@ define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_one:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_one:
@@ -1315,19 +1125,9 @@ define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ord:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ord:
@@ -1372,20 +1172,10 @@ define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ueq:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: or a0, a0, a4
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ueq:
@@ -1463,18 +1253,8 @@ define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ugt:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ugt:
@@ -1516,18 +1296,8 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_uge:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_uge:
@@ -1571,18 +1341,8 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ult:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ult:
@@ -1624,18 +1384,8 @@ define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_ule:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_ule:
@@ -1679,20 +1429,10 @@ define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_une:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: and a0, a0, a4
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_une:
@@ -1738,20 +1478,10 @@ define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fcmps_uno:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2
; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcmps_uno:
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index 64a154f..1e609f8 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -45,17 +45,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_oeq:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_oeq:
@@ -95,17 +85,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ogt:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ogt:
@@ -145,17 +125,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_oge:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_oge:
@@ -197,17 +167,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_olt:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_olt:
@@ -247,17 +207,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ole:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ole:
@@ -299,19 +249,9 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_one:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: or a0, a0, a4
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_one:
@@ -389,19 +329,9 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ord:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; CHECKRV32IZFINXZDINX-NEXT: and a0, a0, a2
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ord:
@@ -446,20 +376,10 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ueq:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: or a0, a0, a4
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ueq:
@@ -537,18 +457,8 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ugt:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ugt:
@@ -590,18 +500,8 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_uge:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_uge:
@@ -645,18 +545,8 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ult:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ult:
@@ -698,18 +588,8 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_ule:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_ule:
@@ -751,18 +631,8 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_une:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_une:
@@ -806,20 +676,10 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
;
; CHECKRV32IZFINXZDINX-LABEL: fcmp_uno:
; CHECKRV32IZFINXZDINX: # %bb.0:
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; CHECKRV32IZFINXZDINX-NEXT: and a0, a0, a2
; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16
; CHECKRV32IZFINXZDINX-NEXT: ret
;
; CHECKRV64IZFINXZDINX-LABEL: fcmp_uno:
diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll
index 9254369..74d4acc 100644
--- a/llvm/test/CodeGen/RISCV/double-imm.ll
+++ b/llvm/test/CodeGen/RISCV/double-imm.ll
@@ -54,20 +54,10 @@ define double @double_imm_op(double %a) nounwind {
;
; CHECKRV32ZDINX-LABEL: double_imm_op:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: lui a2, %hi(.LCPI1_0)
; CHECKRV32ZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
; CHECKRV32ZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; CHECKRV32ZDINX-NEXT: fadd.d a0, a0, a2
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: double_imm_op:
@@ -153,24 +143,18 @@ define dso_local double @negzero_sel(i16 noundef %a, double noundef %d) nounwind
;
; CHECKRV32ZDINX-LABEL: negzero_sel:
; CHECKRV32ZDINX: # %bb.0: # %entry
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a1, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 12(sp)
-; CHECKRV32ZDINX-NEXT: slli a2, a0, 16
-; CHECKRV32ZDINX-NEXT: fcvt.d.w a0, zero
-; CHECKRV32ZDINX-NEXT: beqz a2, .LBB4_2
+; CHECKRV32ZDINX-NEXT: slli a0, a0, 16
+; CHECKRV32ZDINX-NEXT: fcvt.d.w a4, zero
+; CHECKRV32ZDINX-NEXT: beqz a0, .LBB4_2
; CHECKRV32ZDINX-NEXT: # %bb.1: # %entry
-; CHECKRV32ZDINX-NEXT: fneg.d a0, a0
+; CHECKRV32ZDINX-NEXT: fneg.d a2, a4
; CHECKRV32ZDINX-NEXT: j .LBB4_3
; CHECKRV32ZDINX-NEXT: .LBB4_2:
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
+; CHECKRV32ZDINX-NEXT: mv a3, a2
+; CHECKRV32ZDINX-NEXT: mv a2, a1
; CHECKRV32ZDINX-NEXT: .LBB4_3: # %entry
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
+; CHECKRV32ZDINX-NEXT: mv a0, a2
+; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: negzero_sel:
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index c574f64..3821586 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -28,17 +28,7 @@ define double @sqrt_f64(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: sqrt_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: sqrt_f64:
@@ -299,22 +289,12 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX-NEXT: mv s0, a1
; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: call sin
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s3, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s2, a0
+; RV32IZFINXZDINX-NEXT: mv s3, a1
; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: mv a1, s0
; RV32IZFINXZDINX-NEXT: call cos
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, s2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -765,25 +745,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fma_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fma_f64:
@@ -822,25 +784,7 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: fmuladd_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmuladd_f64:
@@ -1455,13 +1399,7 @@ define iXLen @lrint_f64(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: lrint_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lrint_f64:
@@ -1505,13 +1443,7 @@ define iXLen @lround_f64(double %a) nounwind strictfp {
;
; RV32IZFINXZDINX-LABEL: lround_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lround_f64:
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index f290cf0..52c49cf 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -26,17 +26,7 @@ define double @sqrt_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: sqrt_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: sqrt_f64:
@@ -254,22 +244,12 @@ define double @sincos_f64(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: mv s0, a1
; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: call sin
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s2, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s3, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s2, a0
+; RV32IZFINXZDINX-NEXT: mv s3, a1
; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: mv a1, s0
; RV32IZFINXZDINX-NEXT: call cos
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, s2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -606,25 +586,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fma_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fma_f64:
@@ -663,25 +625,7 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmuladd_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmuladd_f64:
@@ -769,21 +713,7 @@ define double @minnum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: minnum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: minnum_f64:
@@ -822,21 +752,7 @@ define double @maxnum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: maxnum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: maxnum_f64:
@@ -892,21 +808,7 @@ define double @copysign_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: copysign_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fsgnj.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: copysign_f64:
@@ -1381,13 +1283,7 @@ define iXLen @lrint_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: lrint_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lrint_f64:
@@ -1432,13 +1328,7 @@ define iXLen @lround_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: lround_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lround_f64:
@@ -1475,13 +1365,7 @@ define i32 @lround_i32_f64(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: lround_i32_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: lround_i32_f64:
@@ -1625,16 +1509,9 @@ define i1 @isnan_d_fpclass(double %x) {
;
; RV32IZFINXZDINX-LABEL: isnan_d_fpclass:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fclass.d a0, a0
; RV32IZFINXZDINX-NEXT: andi a0, a0, 768
; RV32IZFINXZDINX-NEXT: snez a0, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: isnan_d_fpclass:
diff --git a/llvm/test/CodeGen/RISCV/double-isnan.ll b/llvm/test/CodeGen/RISCV/double-isnan.ll
index 4d0b815..6a3779d 100644
--- a/llvm/test/CodeGen/RISCV/double-isnan.ll
+++ b/llvm/test/CodeGen/RISCV/double-isnan.ll
@@ -17,14 +17,8 @@ define zeroext i1 @double_is_nan(double %a) nounwind {
;
; CHECKRV32ZDINX-LABEL: double_is_nan:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a0
; CHECKRV32ZDINX-NEXT: xori a0, a0, 1
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: double_is_nan:
@@ -44,13 +38,7 @@ define zeroext i1 @double_not_nan(double %a) nounwind {
;
; CHECKRV32ZDINX-LABEL: double_not_nan:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a0
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: double_not_nan:
diff --git a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll
index 0ca2078..5229117 100644
--- a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll
+++ b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll
@@ -36,35 +36,25 @@ define double @fminimum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fminimum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
; RV32IZFINXZDINX-NEXT: mv a4, a2
; RV32IZFINXZDINX-NEXT: mv a5, a3
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB0_2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB0_3
; RV32IZFINXZDINX-NEXT: # %bb.1:
+; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB0_4
+; RV32IZFINXZDINX-NEXT: .LBB0_2:
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: ret
+; RV32IZFINXZDINX-NEXT: .LBB0_3:
; RV32IZFINXZDINX-NEXT: mv a4, a0
; RV32IZFINXZDINX-NEXT: mv a5, a1
-; RV32IZFINXZDINX-NEXT: .LBB0_2:
; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB0_4
-; RV32IZFINXZDINX-NEXT: # %bb.3:
+; RV32IZFINXZDINX-NEXT: bnez a6, .LBB0_2
+; RV32IZFINXZDINX-NEXT: .LBB0_4:
; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: .LBB0_4:
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fminimum_f64:
@@ -113,35 +103,25 @@ define double @fmaximum_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmaximum_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
; RV32IZFINXZDINX-NEXT: mv a4, a2
; RV32IZFINXZDINX-NEXT: mv a5, a3
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB1_2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB1_3
; RV32IZFINXZDINX-NEXT: # %bb.1:
+; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB1_4
+; RV32IZFINXZDINX-NEXT: .LBB1_2:
+; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: ret
+; RV32IZFINXZDINX-NEXT: .LBB1_3:
; RV32IZFINXZDINX-NEXT: mv a4, a0
; RV32IZFINXZDINX-NEXT: mv a5, a1
-; RV32IZFINXZDINX-NEXT: .LBB1_2:
; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB1_4
-; RV32IZFINXZDINX-NEXT: # %bb.3:
+; RV32IZFINXZDINX-NEXT: bnez a6, .LBB1_2
+; RV32IZFINXZDINX-NEXT: .LBB1_4:
; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: .LBB1_4:
; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmaximum_f64:
@@ -174,21 +154,7 @@ define double @fminimum_nnan_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fminimum_nnan_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fminimum_nnan_f64:
@@ -221,35 +187,25 @@ define double @fmaximum_nnan_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmaximum_nnan_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: feq.d a6, a0, a0
; RV32IZFINXZDINX-NEXT: mv a4, a2
; RV32IZFINXZDINX-NEXT: mv a5, a3
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB3_2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB3_3
; RV32IZFINXZDINX-NEXT: # %bb.1:
+; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
+; RV32IZFINXZDINX-NEXT: beqz a6, .LBB3_4
+; RV32IZFINXZDINX-NEXT: .LBB3_2:
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
+; RV32IZFINXZDINX-NEXT: ret
+; RV32IZFINXZDINX-NEXT: .LBB3_3:
; RV32IZFINXZDINX-NEXT: mv a4, a0
; RV32IZFINXZDINX-NEXT: mv a5, a1
-; RV32IZFINXZDINX-NEXT: .LBB3_2:
; RV32IZFINXZDINX-NEXT: feq.d a6, a2, a2
-; RV32IZFINXZDINX-NEXT: bnez a6, .LBB3_4
-; RV32IZFINXZDINX-NEXT: # %bb.3:
+; RV32IZFINXZDINX-NEXT: bnez a6, .LBB3_2
+; RV32IZFINXZDINX-NEXT: .LBB3_4:
; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: .LBB3_4:
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a4
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmaximum_nnan_f64:
@@ -289,30 +245,14 @@ define double @fminimum_nnan_op_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fminimum_nnan_op_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: feq.d a0, a2, a2
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: bnez a0, .LBB4_2
+; RV32IZFINXZDINX-NEXT: feq.d a4, a2, a2
+; RV32IZFINXZDINX-NEXT: bnez a4, .LBB4_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a0, a2
-; RV32IZFINXZDINX-NEXT: mv a1, a3
-; RV32IZFINXZDINX-NEXT: j .LBB4_3
+; RV32IZFINXZDINX-NEXT: fmin.d a0, a2, a2
+; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB4_2:
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a0
-; RV32IZFINXZDINX-NEXT: .LBB4_3:
; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fminimum_nnan_op_f64:
@@ -341,23 +281,9 @@ define double @fmaximum_nnan_op_f64(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmaximum_nnan_op_f64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a4, a0, a2
; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: fmax.d a0, a4, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmaximum_nnan_op_f64:
diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index 6c6f70d..38cb52b 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -18,17 +18,11 @@ define dso_local double @fld(ptr %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fld:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: lw a2, 0(a0)
; RV32IZFINXZDINX-NEXT: lw a3, 4(a0)
; RV32IZFINXZDINX-NEXT: lw a1, 28(a0)
; RV32IZFINXZDINX-NEXT: lw a0, 24(a0)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fld:
@@ -56,21 +50,15 @@ define dso_local void @fsd(ptr %a, double %b, double %c) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsd:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a3, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a4, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a4
+; RV32IZFINXZDINX-NEXT: mv a5, a4
+; RV32IZFINXZDINX-NEXT: mv a7, a2
+; RV32IZFINXZDINX-NEXT: mv a4, a3
+; RV32IZFINXZDINX-NEXT: mv a6, a1
+; RV32IZFINXZDINX-NEXT: fadd.d a2, a6, a4
; RV32IZFINXZDINX-NEXT: sw a2, 0(a0)
; RV32IZFINXZDINX-NEXT: sw a3, 4(a0)
; RV32IZFINXZDINX-NEXT: sw a2, 64(a0)
; RV32IZFINXZDINX-NEXT: sw a3, 68(a0)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsd:
@@ -105,15 +93,6 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fld_fsd_global:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: lui a2, %hi(G)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(G)(a2)
@@ -125,11 +104,6 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: lw a5, 76(a3)
; RV32IZFINXZDINX-NEXT: sw a0, 72(a3)
; RV32IZFINXZDINX-NEXT: sw a1, 76(a3)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fld_fsd_global:
@@ -174,22 +148,12 @@ define dso_local double @fld_fsd_constant(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fld_fsd_constant:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lui a2, 912092
; RV32IZFINXZDINX-NEXT: lw a4, -273(a2)
; RV32IZFINXZDINX-NEXT: lw a5, -269(a2)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a4
; RV32IZFINXZDINX-NEXT: sw a0, -273(a2)
; RV32IZFINXZDINX-NEXT: sw a1, -269(a2)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fld_fsd_constant:
@@ -246,19 +210,13 @@ define dso_local double @fld_stack(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: addi a0, sp, 8
; RV32IZFINXZDINX-NEXT: call notdead
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -313,23 +271,15 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fsd_stack:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 16(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 20(sp)
-; RV32IZFINXZDINX-NEXT: addi a0, sp, 16
+; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
+; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, sp
; RV32IZFINXZDINX-NEXT: call notdead
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsd_stack:
@@ -360,14 +310,10 @@ define dso_local void @fsd_trunc(ptr %a, double %b) nounwind noinline optnone {
;
; RV32IZFINXZDINX-LABEL: fsd_trunc:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a1, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a3, a2
+; RV32IZFINXZDINX-NEXT: mv a2, a1
; RV32IZFINXZDINX-NEXT: fcvt.s.d a1, a2
; RV32IZFINXZDINX-NEXT: sw a1, 0(a0)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fsd_trunc:
diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index 8b8f538..c169b10 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -50,10 +50,6 @@ define i32 @main() nounwind {
; RV32IZFINXZDINX-NEXT: lui a1, 262144
; RV32IZFINXZDINX-NEXT: li a0, 0
; RV32IZFINXZDINX-NEXT: call test
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index 7cdf18e..f1c56b3 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -20,18 +20,11 @@ define signext i32 @test_floor_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rdn
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si32:
@@ -50,48 +43,42 @@ define signext i32 @test_floor_si32(double %x) {
define i64 @test_floor_si64(double %x) nounwind {
; RV32IFD-LABEL: test_floor_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call floor
; RV32IFD-NEXT: lui a0, %hi(.LCPI1_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI1_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI1_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB1_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB1_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB1_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB1_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI1_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB1_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB1_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_si64:
@@ -105,51 +92,45 @@ define i64 @test_floor_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_floor_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call floor
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB1_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB1_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB1_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB1_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI1_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI1_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI1_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB1_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB1_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si64:
@@ -177,18 +158,11 @@ define signext i32 @test_floor_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rdn
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui32:
@@ -241,38 +215,30 @@ define i64 @test_floor_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call floor
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI3_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI3_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI3_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI3_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI3_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI3_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui64:
@@ -300,18 +266,11 @@ define signext i32 @test_ceil_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rup
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si32:
@@ -330,48 +289,42 @@ define signext i32 @test_ceil_si32(double %x) {
define i64 @test_ceil_si64(double %x) nounwind {
; RV32IFD-LABEL: test_ceil_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call ceil
; RV32IFD-NEXT: lui a0, %hi(.LCPI5_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI5_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI5_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB5_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB5_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB5_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB5_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI5_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB5_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB5_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_si64:
@@ -385,51 +338,45 @@ define i64 @test_ceil_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call ceil
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB5_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB5_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB5_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB5_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI5_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI5_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI5_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB5_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB5_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si64:
@@ -457,18 +404,11 @@ define signext i32 @test_ceil_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rup
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
@@ -521,38 +461,30 @@ define i64 @test_ceil_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call ceil
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI7_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI7_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI7_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI7_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI7_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI7_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
@@ -580,18 +512,11 @@ define signext i32 @test_trunc_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si32:
@@ -610,48 +535,42 @@ define signext i32 @test_trunc_si32(double %x) {
define i64 @test_trunc_si64(double %x) nounwind {
; RV32IFD-LABEL: test_trunc_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call trunc
; RV32IFD-NEXT: lui a0, %hi(.LCPI9_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI9_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI9_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB9_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB9_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB9_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB9_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI9_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB9_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB9_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_si64:
@@ -665,51 +584,45 @@ define i64 @test_trunc_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call trunc
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB9_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB9_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB9_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB9_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI9_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI9_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI9_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB9_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB9_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si64:
@@ -737,18 +650,11 @@ define signext i32 @test_trunc_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
@@ -801,38 +707,30 @@ define i64 @test_trunc_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call trunc
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI11_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI11_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI11_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI11_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI11_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI11_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
@@ -860,18 +758,11 @@ define signext i32 @test_round_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rmm
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si32:
@@ -890,48 +781,42 @@ define signext i32 @test_round_si32(double %x) {
define i64 @test_round_si64(double %x) nounwind {
; RV32IFD-LABEL: test_round_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call round
; RV32IFD-NEXT: lui a0, %hi(.LCPI13_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI13_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI13_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB13_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB13_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB13_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB13_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI13_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB13_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB13_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_si64:
@@ -945,51 +830,45 @@ define i64 @test_round_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_round_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call round
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB13_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB13_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB13_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB13_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI13_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI13_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI13_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB13_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB13_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si64:
@@ -1017,18 +896,11 @@ define signext i32 @test_round_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rmm
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui32:
@@ -1081,38 +953,30 @@ define i64 @test_round_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_round_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call round
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI15_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI15_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI15_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI15_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI15_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI15_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui64:
@@ -1140,18 +1004,11 @@ define signext i32 @test_roundeven_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rne
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
@@ -1170,48 +1027,42 @@ define signext i32 @test_roundeven_si32(double %x) {
define i64 @test_roundeven_si64(double %x) nounwind {
; RV32IFD-LABEL: test_roundeven_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call roundeven
; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI17_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI17_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB17_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB17_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB17_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB17_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI17_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB17_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB17_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_si64:
@@ -1225,51 +1076,45 @@ define i64 @test_roundeven_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call roundeven
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB17_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB17_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB17_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB17_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI17_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI17_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB17_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB17_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
@@ -1297,18 +1142,11 @@ define signext i32 @test_roundeven_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rne
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
@@ -1361,38 +1199,30 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call roundeven
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI19_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI19_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI19_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI19_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI19_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI19_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
@@ -1420,18 +1250,11 @@ define signext i32 @test_rint_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_rint_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_si32:
@@ -1450,48 +1273,42 @@ define signext i32 @test_rint_si32(double %x) {
define i64 @test_rint_si64(double %x) nounwind {
; RV32IFD-LABEL: test_rint_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call rint
; RV32IFD-NEXT: lui a0, %hi(.LCPI21_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI21_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI21_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB21_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB21_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB21_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB21_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI21_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB21_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB21_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_rint_si64:
@@ -1505,51 +1322,45 @@ define i64 @test_rint_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_rint_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call rint
-; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB21_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB21_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB21_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB21_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI21_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI21_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI21_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB21_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB21_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_si64:
@@ -1577,18 +1388,11 @@ define signext i32 @test_rint_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_rint_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0
; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
; RV32IZFINXZDINX-NEXT: seqz a0, a0
; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
; RV32IZFINXZDINX-NEXT: and a0, a0, a2
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_ui32:
@@ -1641,38 +1445,30 @@ define i64 @test_rint_ui64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_rint_ui64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call rint
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv s0, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
+; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI23_0)
+; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI23_0+4)(a4)
+; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI23_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI23_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI23_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI23_0)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s2, a0
-; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: and a1, s2, a1
-; RV32IZFINXZDINX-NEXT: or a1, a2, a1
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
+; RV32IZFINXZDINX-NEXT: neg a3, a3
+; RV32IZFINXZDINX-NEXT: or a0, a3, a0
+; RV32IZFINXZDINX-NEXT: and a1, a2, a1
+; RV32IZFINXZDINX-NEXT: or a1, a3, a1
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_ui64:
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 094a410..d84d80a 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -21,14 +21,7 @@ define signext i8 @test_floor_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si8:
@@ -53,14 +46,7 @@ define signext i16 @test_floor_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si16:
@@ -80,14 +66,7 @@ define signext i32 @test_floor_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si32:
@@ -151,14 +130,7 @@ define zeroext i8 @test_floor_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui8:
@@ -183,14 +155,7 @@ define zeroext i16 @test_floor_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui16:
@@ -210,14 +175,7 @@ define signext i32 @test_floor_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_floor_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_ui32:
@@ -281,14 +239,7 @@ define signext i8 @test_ceil_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si8:
@@ -313,14 +264,7 @@ define signext i16 @test_ceil_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si16:
@@ -340,14 +284,7 @@ define signext i32 @test_ceil_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si32:
@@ -411,14 +348,7 @@ define zeroext i8 @test_ceil_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui8:
@@ -443,14 +373,7 @@ define zeroext i16 @test_ceil_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui16:
@@ -470,14 +393,7 @@ define signext i32 @test_ceil_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_ceil_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
@@ -541,14 +457,7 @@ define signext i8 @test_trunc_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si8:
@@ -573,14 +482,7 @@ define signext i16 @test_trunc_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si16:
@@ -600,14 +502,7 @@ define signext i32 @test_trunc_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si32:
@@ -671,14 +566,7 @@ define zeroext i8 @test_trunc_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui8:
@@ -703,14 +591,7 @@ define zeroext i16 @test_trunc_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui16:
@@ -730,14 +611,7 @@ define signext i32 @test_trunc_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_trunc_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
@@ -801,14 +675,7 @@ define signext i8 @test_round_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si8:
@@ -833,14 +700,7 @@ define signext i16 @test_round_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si16:
@@ -860,14 +720,7 @@ define signext i32 @test_round_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si32:
@@ -931,14 +784,7 @@ define zeroext i8 @test_round_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui8:
@@ -963,14 +809,7 @@ define zeroext i16 @test_round_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui16:
@@ -990,14 +829,7 @@ define signext i32 @test_round_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_round_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_ui32:
@@ -1061,14 +893,7 @@ define signext i8 @test_roundeven_si8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si8:
@@ -1093,14 +918,7 @@ define signext i16 @test_roundeven_si16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si16:
@@ -1120,14 +938,7 @@ define signext i32 @test_roundeven_si32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
@@ -1191,14 +1002,7 @@ define zeroext i8 @test_roundeven_ui8(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui8:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui8:
@@ -1223,14 +1027,7 @@ define zeroext i16 @test_roundeven_ui16(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui16:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui16:
@@ -1250,14 +1047,7 @@ define signext i32 @test_roundeven_ui32(double %x) {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_ui32:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
index 766da36..654a460 100644
--- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
@@ -41,26 +41,12 @@ define double @select_fcmp_oeq(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_oeq:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB1_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB1_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_oeq:
@@ -88,26 +74,12 @@ define double @select_fcmp_ogt(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ogt:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB2_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB2_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ogt:
@@ -135,26 +107,12 @@ define double @select_fcmp_oge(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_oge:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB3_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB3_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_oge:
@@ -182,26 +140,12 @@ define double @select_fcmp_olt(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_olt:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB4_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB4_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_olt:
@@ -229,26 +173,12 @@ define double @select_fcmp_ole(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ole:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: bnez a4, .LBB5_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB5_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ole:
@@ -278,15 +208,6 @@ define double @select_fcmp_one(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_one:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: flt.d a5, a2, a0
; CHECKRV32ZDINX-NEXT: or a4, a5, a4
@@ -295,11 +216,6 @@ define double @select_fcmp_one(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB6_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_one:
@@ -331,15 +247,6 @@ define double @select_fcmp_ord(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ord:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a2, a2
; CHECKRV32ZDINX-NEXT: feq.d a5, a0, a0
; CHECKRV32ZDINX-NEXT: and a4, a5, a4
@@ -348,11 +255,6 @@ define double @select_fcmp_ord(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB7_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ord:
@@ -384,15 +286,6 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ueq:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: flt.d a5, a2, a0
; CHECKRV32ZDINX-NEXT: or a4, a5, a4
@@ -401,11 +294,6 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB8_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ueq:
@@ -435,26 +323,12 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ugt:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB9_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB9_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ugt:
@@ -482,26 +356,12 @@ define double @select_fcmp_uge(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uge:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB10_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB10_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uge:
@@ -529,26 +389,12 @@ define double @select_fcmp_ult(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ult:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB11_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB11_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ult:
@@ -576,26 +422,12 @@ define double @select_fcmp_ule(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_ule:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: flt.d a4, a2, a0
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB12_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB12_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_ule:
@@ -623,26 +455,12 @@ define double @select_fcmp_une(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_une:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a0, a2
; CHECKRV32ZDINX-NEXT: beqz a4, .LBB13_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB13_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_une:
@@ -672,15 +490,6 @@ define double @select_fcmp_uno(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uno:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a4, a2, a2
; CHECKRV32ZDINX-NEXT: feq.d a5, a0, a0
; CHECKRV32ZDINX-NEXT: and a4, a5, a4
@@ -689,11 +498,6 @@ define double @select_fcmp_uno(double %a, double %b) nounwind {
; CHECKRV32ZDINX-NEXT: mv a0, a2
; CHECKRV32ZDINX-NEXT: mv a1, a3
; CHECKRV32ZDINX-NEXT: .LBB14_2:
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uno:
@@ -741,22 +545,12 @@ define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind {
;
; CHECKRV32ZDINX-LABEL: i32_select_fcmp_oeq:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a1, a0, a2
; CHECKRV32ZDINX-NEXT: mv a0, a4
; CHECKRV32ZDINX-NEXT: bnez a1, .LBB16_2
; CHECKRV32ZDINX-NEXT: # %bb.1:
; CHECKRV32ZDINX-NEXT: mv a0, a5
; CHECKRV32ZDINX-NEXT: .LBB16_2:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: i32_select_fcmp_oeq:
@@ -783,20 +577,9 @@ define i32 @select_fcmp_oeq_1_2(double %a, double %b) {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_oeq_1_2:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a2
; CHECKRV32ZDINX-NEXT: li a1, 2
; CHECKRV32ZDINX-NEXT: sub a0, a1, a0
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_oeq_1_2:
@@ -819,18 +602,8 @@ define signext i32 @select_fcmp_uge_negone_zero(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uge_negone_zero:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a0, a0, a2
; CHECKRV32ZDINX-NEXT: addi a0, a0, -1
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uge_negone_zero:
@@ -852,18 +625,8 @@ define signext i32 @select_fcmp_uge_1_2(double %a, double %b) nounwind {
;
; CHECKRV32ZDINX-LABEL: select_fcmp_uge_1_2:
; CHECKRV32ZDINX: # %bb.0:
-; CHECKRV32ZDINX-NEXT: addi sp, sp, -16
-; CHECKRV32ZDINX-NEXT: sw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a2, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a3, 12(sp)
-; CHECKRV32ZDINX-NEXT: sw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: sw a1, 12(sp)
-; CHECKRV32ZDINX-NEXT: lw a0, 8(sp)
-; CHECKRV32ZDINX-NEXT: lw a1, 12(sp)
; CHECKRV32ZDINX-NEXT: fle.d a0, a0, a2
; CHECKRV32ZDINX-NEXT: addi a0, a0, 1
-; CHECKRV32ZDINX-NEXT: addi sp, sp, 16
; CHECKRV32ZDINX-NEXT: ret
;
; CHECKRV64ZDINX-LABEL: select_fcmp_uge_1_2:
diff --git a/llvm/test/CodeGen/RISCV/double-select-icmp.ll b/llvm/test/CodeGen/RISCV/double-select-icmp.ll
index d864ff5..929ffc5 100644
--- a/llvm/test/CodeGen/RISCV/double-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-icmp.ll
@@ -20,24 +20,13 @@ define double @select_icmp_eq(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_eq:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bne a0, a1, .LBB0_2
+; RV32ZDINX-NEXT: beq a0, a1, .LBB0_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB0_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_eq:
@@ -64,24 +53,13 @@ define double @select_icmp_ne(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ne:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: beq a0, a1, .LBB1_2
+; RV32ZDINX-NEXT: bne a0, a1, .LBB1_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB1_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ne:
@@ -108,24 +86,13 @@ define double @select_icmp_ugt(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ugt:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bgeu a1, a0, .LBB2_2
+; RV32ZDINX-NEXT: bltu a1, a0, .LBB2_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB2_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ugt:
@@ -152,24 +119,13 @@ define double @select_icmp_uge(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_uge:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bltu a0, a1, .LBB3_2
+; RV32ZDINX-NEXT: bgeu a0, a1, .LBB3_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB3_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_uge:
@@ -196,24 +152,13 @@ define double @select_icmp_ult(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ult:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bgeu a0, a1, .LBB4_2
+; RV32ZDINX-NEXT: bltu a0, a1, .LBB4_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB4_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ult:
@@ -240,24 +185,13 @@ define double @select_icmp_ule(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_ule:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bltu a1, a0, .LBB5_2
+; RV32ZDINX-NEXT: bgeu a1, a0, .LBB5_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB5_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_ule:
@@ -284,24 +218,13 @@ define double @select_icmp_sgt(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_sgt:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bge a1, a0, .LBB6_2
+; RV32ZDINX-NEXT: blt a1, a0, .LBB6_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB6_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sgt:
@@ -328,24 +251,13 @@ define double @select_icmp_sge(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_sge:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: blt a0, a1, .LBB7_2
+; RV32ZDINX-NEXT: bge a0, a1, .LBB7_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB7_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sge:
@@ -372,24 +284,13 @@ define double @select_icmp_slt(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_slt:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: bge a0, a1, .LBB8_2
+; RV32ZDINX-NEXT: blt a0, a1, .LBB8_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB8_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_slt:
@@ -416,24 +317,13 @@ define double @select_icmp_sle(i32 signext %a, i32 signext %b, double %c, double
;
; RV32ZDINX-LABEL: select_icmp_sle:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
-; RV32ZDINX-NEXT: sw a2, 8(sp)
-; RV32ZDINX-NEXT: sw a3, 12(sp)
-; RV32ZDINX-NEXT: blt a1, a0, .LBB9_2
+; RV32ZDINX-NEXT: bge a1, a0, .LBB9_2
; RV32ZDINX-NEXT: # %bb.1:
-; RV32ZDINX-NEXT: lw a4, 8(sp)
-; RV32ZDINX-NEXT: lw a5, 12(sp)
+; RV32ZDINX-NEXT: mv a2, a4
+; RV32ZDINX-NEXT: mv a3, a5
; RV32ZDINX-NEXT: .LBB9_2:
-; RV32ZDINX-NEXT: sw a4, 8(sp)
-; RV32ZDINX-NEXT: sw a5, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
+; RV32ZDINX-NEXT: mv a0, a2
+; RV32ZDINX-NEXT: mv a1, a3
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sle:
@@ -458,15 +348,8 @@ define double @select_icmp_slt_one(i32 signext %a) {
;
; RV32ZDINX-LABEL: select_icmp_slt_one:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32ZDINX-NEXT: slti a0, a0, 1
; RV32ZDINX-NEXT: fcvt.d.w a0, a0
-; RV32ZDINX-NEXT: sw a0, 8(sp)
-; RV32ZDINX-NEXT: sw a1, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_slt_one:
@@ -488,15 +371,8 @@ define double @select_icmp_sgt_zero(i32 signext %a) {
;
; RV32ZDINX-LABEL: select_icmp_sgt_zero:
; RV32ZDINX: # %bb.0:
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32ZDINX-NEXT: slti a0, a0, 1
; RV32ZDINX-NEXT: fcvt.d.w a0, a0
-; RV32ZDINX-NEXT: sw a0, 8(sp)
-; RV32ZDINX-NEXT: sw a1, 12(sp)
-; RV32ZDINX-NEXT: lw a0, 8(sp)
-; RV32ZDINX-NEXT: lw a1, 12(sp)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: select_icmp_sgt_zero:
diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index aa88a36..4ae912a 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -62,40 +62,28 @@ define double @func(double %d, i32 %n) nounwind {
;
; RV32IZFINXZDINX-LABEL: func:
; RV32IZFINXZDINX: # %bb.0: # %entry
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: mv s1, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: beqz a2, .LBB0_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else
; RV32IZFINXZDINX-NEXT: addi a2, a2, -1
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, s0
+; RV32IZFINXZDINX-NEXT: mv a1, s1
; RV32IZFINXZDINX-NEXT: call func
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: j .LBB0_3
; RV32IZFINXZDINX-NEXT: .LBB0_2: # %return
-; RV32IZFINXZDINX-NEXT: sw s0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
+; RV32IZFINXZDINX-NEXT: mv a0, s0
+; RV32IZFINXZDINX-NEXT: mv a1, s1
; RV32IZFINXZDINX-NEXT: .LBB0_3: # %return
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: func:
diff --git a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
index fb0b34c..a44d31d 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
@@ -160,17 +160,13 @@ define double @caller_double(double %x) nounwind {
;
; ZDINX32-LABEL: caller_double:
; ZDINX32: # %bb.0: # %entry
-; ZDINX32-NEXT: addi sp, sp, -32
-; ZDINX32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; ZDINX32-NEXT: sw a0, 16(sp)
-; ZDINX32-NEXT: sw a1, 20(sp)
-; ZDINX32-NEXT: lw a0, 16(sp)
-; ZDINX32-NEXT: lw a1, 20(sp)
+; ZDINX32-NEXT: addi sp, sp, -16
+; ZDINX32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; ZDINX32-NEXT: sw a0, 0(sp)
; ZDINX32-NEXT: sw a1, 4(sp)
; ZDINX32-NEXT: call d
-; ZDINX32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; ZDINX32-NEXT: addi sp, sp, 32
+; ZDINX32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; ZDINX32-NEXT: addi sp, sp, 16
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: caller_double:
@@ -200,14 +196,8 @@ define internal fastcc double @d(double %x) nounwind {
;
; ZDINX32-LABEL: d:
; ZDINX32: # %bb.0: # %entry
-; ZDINX32-NEXT: addi sp, sp, -16
-; ZDINX32-NEXT: lw a0, 16(sp)
-; ZDINX32-NEXT: lw a1, 20(sp)
-; ZDINX32-NEXT: sw a0, 8(sp)
-; ZDINX32-NEXT: sw a1, 12(sp)
-; ZDINX32-NEXT: lw a0, 8(sp)
-; ZDINX32-NEXT: lw a1, 12(sp)
-; ZDINX32-NEXT: addi sp, sp, 16
+; ZDINX32-NEXT: lw a0, 0(sp)
+; ZDINX32-NEXT: lw a1, 4(sp)
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: d:
@@ -1360,14 +1350,8 @@ define fastcc double @callee_double_32(<32 x double> %A) nounwind {
;
; ZDINX32-LABEL: callee_double_32:
; ZDINX32: # %bb.0:
-; ZDINX32-NEXT: addi sp, sp, -16
-; ZDINX32-NEXT: lw a0, 16(sp)
-; ZDINX32-NEXT: lw a1, 20(sp)
-; ZDINX32-NEXT: sw a0, 8(sp)
-; ZDINX32-NEXT: sw a1, 12(sp)
-; ZDINX32-NEXT: lw a0, 8(sp)
-; ZDINX32-NEXT: lw a1, 12(sp)
-; ZDINX32-NEXT: addi sp, sp, 16
+; ZDINX32-NEXT: lw a0, 0(sp)
+; ZDINX32-NEXT: lw a1, 4(sp)
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: callee_double_32:
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 9fb78d4..2c7315f 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -629,23 +629,23 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB12_2
; RV32IF-NEXT: # %bb.1: # %start
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB12_2: # %start
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB12_4
+; RV32IF-NEXT: beqz a3, .LBB12_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB12_4: # %start
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -668,37 +668,35 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
-; RV32IZFINX-NEXT: lui a2, %hi(.LCPI12_0)
-; RV32IZFINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
-; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
-; RV32IZFINX-NEXT: or a0, a2, a0
-; RV32IZFINX-NEXT: feq.s a2, s0, s0
-; RV32IZFINX-NEXT: neg a2, a2
-; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
+; RV32IZFINX-NEXT: lui a2, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB12_2
; RV32IZFINX-NEXT: # %bb.1: # %start
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a2, a1
; RV32IZFINX-NEXT: .LBB12_2: # %start
-; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB12_4
+; RV32IZFINX-NEXT: lui a1, %hi(.LCPI12_0)
+; RV32IZFINX-NEXT: lw a1, %lo(.LCPI12_0)(a1)
+; RV32IZFINX-NEXT: flt.s a3, a1, s0
+; RV32IZFINX-NEXT: beqz a3, .LBB12_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a2, a4, -1
; RV32IZFINX-NEXT: .LBB12_4: # %start
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: feq.s a1, s0, s0
+; RV32IZFINX-NEXT: neg a4, a1
+; RV32IZFINX-NEXT: and a1, a4, a2
+; RV32IZFINX-NEXT: neg a2, s1
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: neg a2, a3
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a0, a4, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
-; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index c72e69c..4f747c2 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -59,23 +59,23 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI1_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB1_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB1_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB1_6
+; RV32IF-NEXT: beqz a3, .LBB1_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB1_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -117,23 +117,23 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB1_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB1_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB1_6
+; RV32IZFINX-NEXT: beqz a3, .LBB1_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB1_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -321,23 +321,23 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB5_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB5_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB5_6
+; RV32IF-NEXT: beqz a3, .LBB5_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB5_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -379,23 +379,23 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB5_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB5_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB5_6
+; RV32IZFINX-NEXT: beqz a3, .LBB5_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB5_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -583,23 +583,23 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI9_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB9_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB9_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB9_6
+; RV32IF-NEXT: beqz a3, .LBB9_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB9_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -641,23 +641,23 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB9_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB9_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB9_6
+; RV32IZFINX-NEXT: beqz a3, .LBB9_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB9_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -845,23 +845,23 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI13_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB13_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB13_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB13_6
+; RV32IF-NEXT: beqz a3, .LBB13_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB13_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -903,23 +903,23 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB13_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB13_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB13_6
+; RV32IZFINX-NEXT: beqz a3, .LBB13_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB13_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1107,23 +1107,23 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI17_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB17_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB17_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB17_6
+; RV32IF-NEXT: beqz a3, .LBB17_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB17_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1165,23 +1165,23 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB17_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB17_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB17_6
+; RV32IZFINX-NEXT: beqz a3, .LBB17_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB17_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1369,23 +1369,23 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI21_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB21_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB21_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB21_6
+; RV32IF-NEXT: beqz a3, .LBB21_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB21_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1427,23 +1427,23 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB21_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB21_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB21_6
+; RV32IZFINX-NEXT: beqz a3, .LBB21_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB21_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index f03a020..677aa92 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -1745,13 +1745,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
;
; RV32IZDINXZHINX-LABEL: fcvt_h_d:
; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
;
; RV64IZDINXZHINX-LABEL: fcvt_h_d:
@@ -1807,13 +1801,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_d:
@@ -1878,13 +1866,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
;
; RV32IZDINXZHINX-LABEL: fcvt_d_h:
; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
;
; RV64IZDINXZHINX-LABEL: fcvt_d_h:
@@ -1944,13 +1926,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_d_h:
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 28ac6e2..16c0962 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2460,47 +2460,42 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
;
; RV32ID-ILP32-LABEL: fcvt_l_h_sat:
; RV32ID-ILP32: # %bb.0: # %start
-; RV32ID-ILP32-NEXT: addi sp, sp, -32
-; RV32ID-ILP32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: addi sp, sp, -16
+; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
-; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
-; RV32ID-ILP32-NEXT: fsw fa4, 8(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: flt.s s0, fa5, fa4
-; RV32ID-ILP32-NEXT: neg s1, s0
; RV32ID-ILP32-NEXT: lui a1, 913408
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a1
-; RV32ID-ILP32-NEXT: fle.s s2, fa5, fa4
-; RV32ID-ILP32-NEXT: neg s3, s2
+; RV32ID-ILP32-NEXT: fsw fa4, 4(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: fle.s s0, fa5, fa4
; RV32ID-ILP32-NEXT: call __fixsfdi
-; RV32ID-ILP32-NEXT: and a0, s3, a0
-; RV32ID-ILP32-NEXT: or a0, s1, a0
-; RV32ID-ILP32-NEXT: flw fa5, 8(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: feq.s a2, fa5, fa5
-; RV32ID-ILP32-NEXT: neg a2, a2
; RV32ID-ILP32-NEXT: lui a4, 524288
-; RV32ID-ILP32-NEXT: lui a3, 524288
-; RV32ID-ILP32-NEXT: beqz s2, .LBB10_2
+; RV32ID-ILP32-NEXT: lui a2, 524288
+; RV32ID-ILP32-NEXT: beqz s0, .LBB10_2
; RV32ID-ILP32-NEXT: # %bb.1: # %start
-; RV32ID-ILP32-NEXT: mv a3, a1
+; RV32ID-ILP32-NEXT: mv a2, a1
; RV32ID-ILP32-NEXT: .LBB10_2: # %start
-; RV32ID-ILP32-NEXT: and a0, a2, a0
-; RV32ID-ILP32-NEXT: beqz s0, .LBB10_4
+; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0)
+; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32ID-ILP32-NEXT: flw fa4, 4(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: flt.s a3, fa5, fa4
+; RV32ID-ILP32-NEXT: fmv.s fa5, fa4
+; RV32ID-ILP32-NEXT: beqz a3, .LBB10_4
; RV32ID-ILP32-NEXT: # %bb.3:
-; RV32ID-ILP32-NEXT: addi a3, a4, -1
+; RV32ID-ILP32-NEXT: addi a2, a4, -1
; RV32ID-ILP32-NEXT: .LBB10_4: # %start
-; RV32ID-ILP32-NEXT: and a1, a2, a3
-; RV32ID-ILP32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: addi sp, sp, 32
+; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
+; RV32ID-ILP32-NEXT: neg a4, a1
+; RV32ID-ILP32-NEXT: and a1, a4, a2
+; RV32ID-ILP32-NEXT: neg a2, a3
+; RV32ID-ILP32-NEXT: neg a3, s0
+; RV32ID-ILP32-NEXT: and a0, a3, a0
+; RV32ID-ILP32-NEXT: or a0, a2, a0
+; RV32ID-ILP32-NEXT: and a0, a4, a0
+; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: addi sp, sp, 16
; RV32ID-ILP32-NEXT: ret
;
; RV64ID-LP64-LABEL: fcvt_l_h_sat:
@@ -5275,21 +5270,10 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
;
-; RV32IZDINXZHINX-LABEL: fcvt_h_d:
-; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
-; RV32IZDINXZHINX-NEXT: ret
-;
-; RV64IZDINXZHINX-LABEL: fcvt_h_d:
-; RV64IZDINXZHINX: # %bb.0:
-; RV64IZDINXZHINX-NEXT: fcvt.h.d a0, a0
-; RV64IZDINXZHINX-NEXT: ret
+; CHECKIZDINXZHINX-LABEL: fcvt_h_d:
+; CHECKIZDINXZHINX: # %bb.0:
+; CHECKIZDINXZHINX-NEXT: fcvt.h.d a0, a0
+; CHECKIZDINXZHINX-NEXT: ret
;
; RV32I-LABEL: fcvt_h_d:
; RV32I: # %bb.0:
@@ -5405,13 +5389,7 @@ define half @fcvt_h_d(double %a) nounwind {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_d:
@@ -5473,21 +5451,10 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
;
-; RV32IZDINXZHINX-LABEL: fcvt_d_h:
-; RV32IZDINXZHINX: # %bb.0:
-; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
-; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0
-; RV32IZDINXZHINX-NEXT: sw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: sw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: lw a0, 8(sp)
-; RV32IZDINXZHINX-NEXT: lw a1, 12(sp)
-; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
-; RV32IZDINXZHINX-NEXT: ret
-;
-; RV64IZDINXZHINX-LABEL: fcvt_d_h:
-; RV64IZDINXZHINX: # %bb.0:
-; RV64IZDINXZHINX-NEXT: fcvt.d.h a0, a0
-; RV64IZDINXZHINX-NEXT: ret
+; CHECKIZDINXZHINX-LABEL: fcvt_d_h:
+; CHECKIZDINXZHINX: # %bb.0:
+; CHECKIZDINXZHINX-NEXT: fcvt.d.h a0, a0
+; CHECKIZDINXZHINX-NEXT: ret
;
; RV32I-LABEL: fcvt_d_h:
; RV32I: # %bb.0:
@@ -5607,13 +5574,7 @@ define double @fcvt_d_h(half %a) nounwind {
;
; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp)
-; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_d_h:
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index dd1115b..9c95210 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -120,16 +120,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI1_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI1_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB1_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB1_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -137,11 +137,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB1_6
+; RV32IZFH-NEXT: beqz a3, .LBB1_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB1_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_floor_si64:
@@ -179,16 +179,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI1_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI1_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB1_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB1_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -196,11 +196,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB1_6
+; RV32IZHINX-NEXT: beqz a3, .LBB1_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB1_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_floor_si64:
@@ -251,16 +251,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI1_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB1_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB1_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -268,11 +268,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB1_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB1_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB1_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_floor_si64:
@@ -324,16 +324,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB1_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB1_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -341,11 +341,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB1_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB1_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB1_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_floor_si64:
@@ -836,16 +836,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI5_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI5_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB5_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB5_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -853,11 +853,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB5_6
+; RV32IZFH-NEXT: beqz a3, .LBB5_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB5_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_ceil_si64:
@@ -895,16 +895,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI5_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI5_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB5_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB5_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -912,11 +912,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB5_6
+; RV32IZHINX-NEXT: beqz a3, .LBB5_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB5_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_ceil_si64:
@@ -967,16 +967,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB5_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB5_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -984,11 +984,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB5_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB5_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB5_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_ceil_si64:
@@ -1040,16 +1040,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB5_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB5_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1057,11 +1057,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB5_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB5_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB5_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_ceil_si64:
@@ -1552,16 +1552,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI9_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI9_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB9_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB9_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1569,11 +1569,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB9_6
+; RV32IZFH-NEXT: beqz a3, .LBB9_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB9_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_trunc_si64:
@@ -1611,16 +1611,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI9_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI9_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB9_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB9_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1628,11 +1628,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB9_6
+; RV32IZHINX-NEXT: beqz a3, .LBB9_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB9_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_trunc_si64:
@@ -1683,16 +1683,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI9_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB9_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB9_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1700,11 +1700,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB9_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB9_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB9_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_trunc_si64:
@@ -1756,16 +1756,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB9_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB9_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1773,11 +1773,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB9_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB9_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB9_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_trunc_si64:
@@ -2268,16 +2268,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI13_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI13_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB13_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB13_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2285,11 +2285,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB13_6
+; RV32IZFH-NEXT: beqz a3, .LBB13_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB13_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_round_si64:
@@ -2327,16 +2327,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI13_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI13_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB13_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB13_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2344,11 +2344,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB13_6
+; RV32IZHINX-NEXT: beqz a3, .LBB13_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB13_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_round_si64:
@@ -2399,16 +2399,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI13_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB13_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB13_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2416,11 +2416,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB13_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB13_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB13_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_round_si64:
@@ -2472,16 +2472,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB13_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB13_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2489,11 +2489,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB13_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB13_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB13_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_round_si64:
@@ -2984,16 +2984,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI17_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI17_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB17_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB17_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3001,11 +3001,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB17_6
+; RV32IZFH-NEXT: beqz a3, .LBB17_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB17_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_roundeven_si64:
@@ -3043,16 +3043,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI17_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI17_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB17_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB17_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3060,11 +3060,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB17_6
+; RV32IZHINX-NEXT: beqz a3, .LBB17_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB17_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_roundeven_si64:
@@ -3115,16 +3115,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI17_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB17_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB17_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3132,11 +3132,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB17_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB17_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB17_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_roundeven_si64:
@@ -3188,16 +3188,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB17_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB17_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3205,11 +3205,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB17_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB17_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB17_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_roundeven_si64:
@@ -3700,16 +3700,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI21_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI21_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB21_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB21_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3717,11 +3717,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB21_6
+; RV32IZFH-NEXT: beqz a3, .LBB21_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB21_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_rint_si64:
@@ -3759,16 +3759,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI21_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI21_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB21_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB21_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3776,11 +3776,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB21_6
+; RV32IZHINX-NEXT: beqz a3, .LBB21_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB21_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_rint_si64:
@@ -3831,16 +3831,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI21_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB21_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB21_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3848,11 +3848,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB21_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB21_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB21_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_rint_si64:
@@ -3904,16 +3904,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB21_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB21_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3921,11 +3921,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB21_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB21_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB21_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_rint_si64:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
index 71769a8..c480ba8 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
@@ -75,24 +75,10 @@ define double @constraint_f_double_abi_name(double %a) nounwind {
define double @constraint_gpr(double %x) {
; RV32F-LABEL: constraint_gpr:
; RV32F: # %bb.0:
-; RV32F-NEXT: addi sp, sp, -32
-; RV32F-NEXT: .cfi_def_cfa_offset 32
-; RV32F-NEXT: sw a0, 8(sp)
-; RV32F-NEXT: sw a1, 12(sp)
-; RV32F-NEXT: fld fa5, 8(sp)
-; RV32F-NEXT: fsd fa5, 24(sp)
-; RV32F-NEXT: lw a0, 24(sp)
-; RV32F-NEXT: lw a1, 28(sp)
+; RV32F-NEXT: .cfi_def_cfa_offset 0
; RV32F-NEXT: #APP
; RV32F-NEXT: mv a0, a0
; RV32F-NEXT: #NO_APP
-; RV32F-NEXT: sw a1, 20(sp)
-; RV32F-NEXT: sw a0, 16(sp)
-; RV32F-NEXT: fld fa5, 16(sp)
-; RV32F-NEXT: fsd fa5, 8(sp)
-; RV32F-NEXT: lw a0, 8(sp)
-; RV32F-NEXT: lw a1, 12(sp)
-; RV32F-NEXT: addi sp, sp, 32
; RV32F-NEXT: ret
;
; RV64F-LABEL: constraint_gpr:
diff --git a/llvm/test/CodeGen/RISCV/live-sp.mir b/llvm/test/CodeGen/RISCV/live-sp.mir
index 8dd307f..fa6297a 100644
--- a/llvm/test/CodeGen/RISCV/live-sp.mir
+++ b/llvm/test/CodeGen/RISCV/live-sp.mir
@@ -44,7 +44,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll
index cfdefec..ebf232c 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll
@@ -740,9 +740,9 @@ define i8 @test_reassoc_minu_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_minu_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a3, a3, 255
-; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: andi a1, a1, 255
; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: minu a0, a0, a1
; CHECK-NEXT: minu a1, a2, a3
; CHECK-NEXT: minu a0, a0, a1
@@ -757,9 +757,9 @@ define i16 @test_reassoc_minu_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_minu_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: zext.h a3, a3
-; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: zext.h a1, a1
; CHECK-NEXT: zext.h a0, a0
+; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: minu a0, a0, a1
; CHECK-NEXT: minu a1, a2, a3
; CHECK-NEXT: minu a0, a0, a1
@@ -774,9 +774,9 @@ define i32 @test_reassoc_minu_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_minu_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: minu a0, a0, a1
; CHECK-NEXT: minu a1, a2, a3
; CHECK-NEXT: minu a0, a0, a1
@@ -804,9 +804,9 @@ define i8 @test_reassoc_min_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_min_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.b a3, a3
-; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: sext.b a1, a1
; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: min a0, a0, a1
; CHECK-NEXT: min a1, a2, a3
; CHECK-NEXT: min a0, a0, a1
@@ -821,9 +821,9 @@ define i16 @test_reassoc_min_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_min_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.h a3, a3
-; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: sext.h a1, a1
; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: min a0, a0, a1
; CHECK-NEXT: min a1, a2, a3
; CHECK-NEXT: min a0, a0, a1
@@ -838,9 +838,9 @@ define i32 @test_reassoc_min_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_min_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: min a0, a0, a1
; CHECK-NEXT: min a1, a2, a3
; CHECK-NEXT: min a0, a0, a1
@@ -868,9 +868,9 @@ define i8 @test_reassoc_maxu_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_maxu_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a3, a3, 255
-; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: andi a1, a1, 255
; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: andi a2, a2, 255
; CHECK-NEXT: maxu a0, a0, a1
; CHECK-NEXT: maxu a1, a2, a3
; CHECK-NEXT: maxu a0, a0, a1
@@ -885,9 +885,9 @@ define i16 @test_reassoc_maxu_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_maxu_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: zext.h a3, a3
-; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: zext.h a1, a1
; CHECK-NEXT: zext.h a0, a0
+; CHECK-NEXT: zext.h a2, a2
; CHECK-NEXT: maxu a0, a0, a1
; CHECK-NEXT: maxu a1, a2, a3
; CHECK-NEXT: maxu a0, a0, a1
@@ -902,9 +902,9 @@ define i32 @test_reassoc_maxu_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_maxu_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: maxu a0, a0, a1
; CHECK-NEXT: maxu a1, a2, a3
; CHECK-NEXT: maxu a0, a0, a1
@@ -932,9 +932,9 @@ define i8 @test_reassoc_max_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_max_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.b a3, a3
-; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: sext.b a1, a1
; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: sext.b a2, a2
; CHECK-NEXT: max a0, a0, a1
; CHECK-NEXT: max a1, a2, a3
; CHECK-NEXT: max a0, a0, a1
@@ -949,9 +949,9 @@ define i16 @test_reassoc_max_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
; CHECK-LABEL: test_reassoc_max_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.h a3, a3
-; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: sext.h a1, a1
; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: sext.h a2, a2
; CHECK-NEXT: max a0, a0, a1
; CHECK-NEXT: max a1, a2, a3
; CHECK-NEXT: max a0, a0, a1
@@ -966,9 +966,9 @@ define i32 @test_reassoc_max_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_max_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a3, a3
-; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: sext.w a1, a1
; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: sext.w a2, a2
; CHECK-NEXT: max a0, a0, a1
; CHECK-NEXT: max a1, a2, a3
; CHECK-NEXT: max a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir b/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir
new file mode 100644
index 0000000..89a6ca7a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir
@@ -0,0 +1,585 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+zcb -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+# RUN: llc -o - %s -mtriple=riscv64 -mattr=+zcb -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+
+--- |
+ define void @store_common_value_i8(ptr %a, ptr %b, ptr %c) #0 {
+ entry:
+ store i8 0, ptr %a, align 1
+ store i8 0, ptr %b, align 1
+ store i8 0, ptr %c, align 1
+ ret void
+ }
+
+ define void @store_common_value_i16(ptr %a, ptr %b, ptr %c) #0 {
+ entry:
+ store i16 0, ptr %a, align 2
+ store i16 0, ptr %b, align 2
+ store i16 0, ptr %c, align 2
+ ret void
+ }
+
+ define void @store_common_ptr_i8(ptr %p) #0 {
+ entry:
+ store volatile i8 1, ptr %p, align 1
+ store volatile i8 3, ptr %p, align 1
+ store volatile i8 5, ptr %p, align 1
+ ret void
+ }
+
+ define void @store_common_ptr_i16(ptr %p) #0 {
+ entry:
+ store volatile i16 1, ptr %p, align 2
+ store volatile i16 3, ptr %p, align 2
+ store volatile i16 5, ptr %p, align 2
+ ret void
+ }
+
+ define void @load_common_ptr_i8(ptr %p) #0 {
+ entry:
+ %0 = load volatile i8, ptr %p, align 1
+ %a = sext i8 %0 to i32
+ %1 = load volatile i8, ptr %p, align 1
+ %2 = load volatile i8, ptr %p, align 1
+ ret void
+ }
+
+ define void @load_common_ptr_s16(ptr %p) #0 {
+ entry:
+ %0 = load volatile i16, ptr %p, align 2
+ %1 = load volatile i16, ptr %p, align 2
+ %2 = load volatile i16, ptr %p, align 2
+ ret void
+ }
+
+ define void @load_common_ptr_u16(ptr %p) #0 {
+ entry:
+ %0 = load volatile i16, ptr %p, align 2
+ %1 = load volatile i16, ptr %p, align 2
+ %2 = load volatile i16, ptr %p, align 2
+ ret void
+ }
+
+ define void @store_large_offset_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ store volatile i8 1, ptr %0, align 1
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ store volatile i8 3, ptr %1, align 1
+ %2 = getelementptr inbounds i8, ptr %p, i8 102
+ store volatile i8 5, ptr %2, align 1
+ %3 = getelementptr inbounds i8, ptr %p, i8 103
+ store volatile i8 7, ptr %3, align 1
+ ret void
+ }
+
+ define void @store_large_offset_i16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ store volatile i16 1, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ store volatile i16 3, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ store volatile i16 3, ptr %1, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ store volatile i16 7, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ %a = load volatile i8, ptr %0
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ %b = load volatile i8, ptr %1
+ %2 = getelementptr inbounds i8, ptr %p, i8 102
+ %c = load volatile i8, ptr %2
+ %3 = getelementptr inbounds i8, ptr %p, i8 103
+ %d = load volatile i8, ptr %3
+ ret void
+ }
+
+ define void @load_large_offset_s16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_u16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+ define void @store_large_offset_no_opt_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ store volatile i8 1, ptr %0, align 1
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ store volatile i8 3, ptr %1, align 1
+ %2 = getelementptr inbounds i8, ptr %p, i8 104
+ store volatile i8 5, ptr %2, align 1
+ ret void
+ }
+
+ define void @store_large_offset_no_opt_i16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ %a = load volatile i8, ptr %0
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ %b = load volatile i8, ptr %1
+ %2 = getelementptr inbounds i8, ptr %p, i8 103
+ %c = load volatile i8, ptr %2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_s16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %2, align 2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_u16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %2, align 2
+ ret void
+ }
+ attributes #0 = { minsize }
+
+...
+---
+name: store_common_value_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_i8
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x0, 0
+ ; CHECK-NEXT: SB $x13, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+ ; CHECK-NEXT: SB $x13, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+ ; CHECK-NEXT: SB $x13, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SB $x0, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+ SB $x0, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+ SB $x0, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_value_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_i16
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x0, 0
+ ; CHECK-NEXT: SH $x13, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ ; CHECK-NEXT: SH $x13, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ ; CHECK-NEXT: SH $x13, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SH $x0, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ SH $x0, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ SH $x0, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: SB killed renamable $x10, $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x10, $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x10, killed $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x10 = ADDI $x0, 1
+ SB killed renamable $x10, renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ renamable $x10 = ADDI $x0, 3
+ SB killed renamable $x10, renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ renamable $x10 = ADDI $x0, 5
+ SB killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_i16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: SH killed renamable $x10, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x10, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x10, killed $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x10 = ADDI $x0, 1
+ SH killed renamable $x10, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = ADDI $x0, 3
+ SH killed renamable $x10, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = ADDI $x0, 5
+ SH killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_i8
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LBU killed $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ dead $x10 = LBU renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ dead $x10 = LBU killed renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_s16
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LH killed $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LH renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LH killed renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_u16
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LHU killed $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LHU renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LHU killed renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ PseudoRET
+
+...
+---
+name: store_large_offset_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: store_large_offset_i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: $x12 = ADDI $x10, 100
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 0 :: (volatile store (s8) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 1 :: (volatile store (s8) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 2 :: (volatile store (s8) into %ir.2)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 7
+ ; CHECK-NEXT: SB killed renamable $x11, killed $x12, 3 :: (volatile store (s8) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SB killed renamable $x11, renamable $x10, 100 :: (volatile store (s8) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SB killed renamable $x11, renamable $x10, 101 :: (volatile store (s8) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SB killed renamable $x11, renamable $x10, 102 :: (volatile store (s8) into %ir.2)
+ renamable $x11 = ADDI $x0, 7
+ SB killed renamable $x11, killed renamable $x10, 103 :: (volatile store (s8) into %ir.3)
+ PseudoRET
+
+...
+---
+name: store_large_offset_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: store_large_offset_i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: $x12 = ADDI $x10, 200
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 0 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 0 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 2 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 7
+ ; CHECK-NEXT: SH killed renamable $x11, killed $x12, 2 :: (volatile store (s16) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SH killed renamable $x11, renamable $x10, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SH killed renamable $x11, renamable $x10, 200 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SH killed renamable $x11, renamable $x10, 202 :: (volatile store (s16) into %ir.2)
+ renamable $x11 = ADDI $x0, 7
+ SH killed renamable $x11, killed renamable $x10, 202 :: (volatile store (s16) into %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 1 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 2 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LBU killed $x11, 3 :: (volatile load (s8) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LBU renamable $x16, 102 :: (volatile load (s8) from %ir.2)
+ dead $x10 = LBU killed renamable $x16, 103 :: (volatile load (s8) from %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_s16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 2 :: (volatile load (s16) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LH killed $x11, 2 :: (volatile load (s16) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s16) from %ir.0)
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s16) from %ir.1)
+ dead $x10 = LH renamable $x16, 102 :: (volatile load (s16) from %ir.2)
+ dead $x10 = LH killed renamable $x16, 102 :: (volatile load (s16) from %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_u16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 2 :: (volatile load (s16) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LHU killed $x11, 2 :: (volatile load (s16) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s16) from %ir.0)
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s16) from %ir.1)
+ dead $x10 = LHU renamable $x16, 102 :: (volatile load (s16) from %ir.2)
+ dead $x10 = LHU killed renamable $x16, 102 :: (volatile load (s16) from %ir.3)
+ PseudoRET
+
+...
+---
+name: store_large_offset_no_opt_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_large_offset_no_opt_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 100 :: (volatile store (s8) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 101 :: (volatile store (s8) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 104 :: (volatile store (s8) into %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SB killed renamable $x11, renamable $x16, 100 :: (volatile store (s8) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SB killed renamable $x11, renamable $x16, 101 :: (volatile store (s8) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SB killed renamable $x11, renamable $x16, 104 :: (volatile store (s8) into %ir.2)
+ PseudoRET
+
+...
+---
+name: store_large_offset_no_opt_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_large_offset_no_opt_i16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SH killed renamable $x11, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SH killed renamable $x11, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SH killed renamable $x11, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LBU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LBU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_s16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LH renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LH renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LH killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LH renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LH killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_u16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LHU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LHU renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LHU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LHU renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LHU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
index 841d0e6..2cca042 100644
--- a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
+++ b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
@@ -1,5 +1,15 @@
-# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched -enable-post-misched -debug-only=machine-scheduler -misched-dump-schedule-trace -misched-postra-direction=topdown -o - %s 2>&1 | FileCheck --check-prefix=TOPDOWN %s
-# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched -enable-post-misched -debug-only=machine-scheduler -misched-dump-schedule-trace -misched-postra-direction=bottomup -o - %s 2>&1 | FileCheck --check-prefix=BOTTOMUP %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=topdown \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=TOPDOWN %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=bottomup \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BOTTOMUP %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=bidirectional \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BIDIRECTIONAL %s
# REQUIRES: asserts
@@ -51,3 +61,9 @@ body: |
# BOTTOMUP-NEXT: SU(1): renamable $x13 = ADD renamable $x11, renamable $x10
# BOTTOMUP-NEXT: SU(0): renamable $x12 = MUL renamable $x11, renamable $x10
# BOTTOMUP-NEXT: SU(2): renamable $x14 = DIVW renamable $x12, renamable $x13
+
+# BIDIRECTIONAL: *** Final schedule for %bb.0 ***
+# BIDIRECTIONAL-NEXT: * Schedule table (Bidirectional): not implemented
+# BIDIRECTIONAL-NEXT: SU(1): renamable $x13 = ADD renamable $x11, renamable $x10
+# BIDIRECTIONAL-NEXT: SU(0): renamable $x12 = MUL renamable $x11, renamable $x10
+# BIDIRECTIONAL-NEXT: SU(2): renamable $x14 = DIVW renamable $x12, renamable $x13
diff --git a/llvm/test/CodeGen/RISCV/module-elf-flags.ll b/llvm/test/CodeGen/RISCV/module-elf-flags.ll
new file mode 100644
index 0000000..1b4bc9f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/module-elf-flags.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=riscv32 -filetype=obj < %s | llvm-readelf -h - | FileCheck -check-prefixes=FLAGS %s
+
+; FLAGS: Flags: 0x11, RVC, TSO
+
+define i32 @addi(i32 %a) {
+ %1 = add i32 %a, 1
+ ret i32 %1
+}
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 6, !"riscv-isa", !1}
+!1 = !{!"rv64i2p1_c2p0_ztso0p1"}
diff --git a/llvm/test/CodeGen/RISCV/pr64645.ll b/llvm/test/CodeGen/RISCV/pr64645.ll
index 44dce5a..f6d4651 100644
--- a/llvm/test/CodeGen/RISCV/pr64645.ll
+++ b/llvm/test/CodeGen/RISCV/pr64645.ll
@@ -5,34 +5,8 @@
define <2 x double> @v2f64(<2 x double> %x, <2 x double> %y) nounwind {
; CHECK-LABEL: v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a4, 8(sp)
-; CHECK-NEXT: sw a5, 12(sp)
-; CHECK-NEXT: lw a4, 8(sp)
-; CHECK-NEXT: lw a5, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: lw a0, 8(sp)
-; CHECK-NEXT: lw a1, 12(sp)
-; CHECK-NEXT: sw a6, 8(sp)
-; CHECK-NEXT: sw a7, 12(sp)
-; CHECK-NEXT: lw a6, 8(sp)
-; CHECK-NEXT: lw a7, 12(sp)
-; CHECK-NEXT: sw a2, 8(sp)
-; CHECK-NEXT: sw a3, 12(sp)
-; CHECK-NEXT: lw a2, 8(sp)
-; CHECK-NEXT: lw a3, 12(sp)
; CHECK-NEXT: fadd.d a2, a2, a6
; CHECK-NEXT: fadd.d a0, a0, a4
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: lw a0, 8(sp)
-; CHECK-NEXT: lw a1, 12(sp)
-; CHECK-NEXT: sw a2, 8(sp)
-; CHECK-NEXT: sw a3, 12(sp)
-; CHECK-NEXT: lw a2, 8(sp)
-; CHECK-NEXT: lw a3, 12(sp)
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = fadd <2 x double> %x, %y
ret <2 x double> %1
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
index 3731b97..b45ab13 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
@@ -11,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB0_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -40,12 +38,11 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: li a0, 32
@@ -64,19 +61,16 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB1_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -87,28 +81,26 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -119,35 +111,21 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB1_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB1_3
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB1_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32XTHEADBB-LABEL: ctlz_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 36c1070..7e6c3f9 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -11,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB0_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -40,12 +38,11 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: li a0, 32
@@ -64,19 +61,16 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB1_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -87,28 +81,26 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -119,35 +111,21 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB1_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB1_3
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB1_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctlz_i64:
@@ -275,8 +253,6 @@ declare i32 @llvm.ctpop.i32(i32)
define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: ctpop_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: lui a2, 349525
; RV32I-NEXT: addi a2, a2, 1365
@@ -293,12 +269,11 @@ define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_i32:
@@ -390,58 +365,42 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s3, a2, 1365
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s4, a1, 819
-; RV32I-NEXT: and a1, a0, s4
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a0, a2
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s5, a1, -241
-; RV32I-NEXT: and a0, a0, s5
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s2, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s4
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s5
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a1, a0, 24
-; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a4, a0
+; RV32I-NEXT: srli a4, a0, 4
+; RV32I-NEXT: add a0, a0, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a5, a0, 8
+; RV32I-NEXT: add a0, a0, a5
+; RV32I-NEXT: slli a5, a0, 16
+; RV32I-NEXT: add a0, a0, a5
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: srli a5, a1, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a1, a1, a3
+; RV32I-NEXT: and a3, a1, a2
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a2, a1, 8
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: slli a2, a1, 16
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 24
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_v2i32:
@@ -558,59 +517,44 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s2, a2, 1365
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub a1, a1, a0
-; RV32I-NEXT: lui a0, 209715
-; RV32I-NEXT: addi s3, a0, 819
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a1, a2
; RV32I-NEXT: srli a1, a1, 2
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s4, a1, -241
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a4, a1
+; RV32I-NEXT: srli a4, a1, 4
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a5, a1, 8
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: slli a5, a1, 16
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: srli a5, a0, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: and a3, a0, a2
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_i64:
@@ -738,99 +682,82 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -48
-; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lw a0, 4(a1)
-; RV32I-NEXT: lw s2, 8(a1)
-; RV32I-NEXT: lw s5, 12(a1)
-; RV32I-NEXT: lw s6, 0(a1)
-; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s3, a2, 1365
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s4, a1, 819
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s7, a1, -241
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s8, a0, 24
-; RV32I-NEXT: srli a0, s6, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s6, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add s8, a0, s8
-; RV32I-NEXT: srli a0, s5, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s5, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s2, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
-; RV32I-NEXT: sw zero, 12(s0)
-; RV32I-NEXT: sw zero, 4(s0)
-; RV32I-NEXT: sw a0, 8(s0)
-; RV32I-NEXT: sw s8, 0(s0)
-; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: lw a3, 4(a1)
+; RV32I-NEXT: lw a2, 8(a1)
+; RV32I-NEXT: lw a4, 12(a1)
+; RV32I-NEXT: lw a1, 0(a1)
+; RV32I-NEXT: srli a5, a3, 1
+; RV32I-NEXT: lui a6, 349525
+; RV32I-NEXT: addi a6, a6, 1365
+; RV32I-NEXT: and a5, a5, a6
+; RV32I-NEXT: sub a3, a3, a5
+; RV32I-NEXT: lui a5, 209715
+; RV32I-NEXT: addi a5, a5, 819
+; RV32I-NEXT: and a7, a3, a5
+; RV32I-NEXT: srli a3, a3, 2
+; RV32I-NEXT: and a3, a3, a5
+; RV32I-NEXT: add a3, a7, a3
+; RV32I-NEXT: srli a7, a3, 4
+; RV32I-NEXT: add a3, a3, a7
+; RV32I-NEXT: lui a7, 61681
+; RV32I-NEXT: addi a7, a7, -241
+; RV32I-NEXT: and a3, a3, a7
+; RV32I-NEXT: slli t0, a3, 8
+; RV32I-NEXT: add a3, a3, t0
+; RV32I-NEXT: slli t0, a3, 16
+; RV32I-NEXT: add a3, a3, t0
+; RV32I-NEXT: srli a3, a3, 24
+; RV32I-NEXT: srli t0, a1, 1
+; RV32I-NEXT: and t0, t0, a6
+; RV32I-NEXT: sub a1, a1, t0
+; RV32I-NEXT: and t0, a1, a5
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a5
+; RV32I-NEXT: add a1, t0, a1
+; RV32I-NEXT: srli t0, a1, 4
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: and a1, a1, a7
+; RV32I-NEXT: slli t0, a1, 8
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: slli t0, a1, 16
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: add a1, a1, a3
+; RV32I-NEXT: srli a3, a4, 1
+; RV32I-NEXT: and a3, a3, a6
+; RV32I-NEXT: sub a4, a4, a3
+; RV32I-NEXT: and a3, a4, a5
+; RV32I-NEXT: srli a4, a4, 2
+; RV32I-NEXT: and a4, a4, a5
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: srli a4, a3, 4
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: and a3, a3, a7
+; RV32I-NEXT: slli a4, a3, 8
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: slli a4, a3, 16
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: srli a3, a3, 24
+; RV32I-NEXT: srli a4, a2, 1
+; RV32I-NEXT: and a4, a4, a6
+; RV32I-NEXT: sub a2, a2, a4
+; RV32I-NEXT: and a4, a2, a5
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, a5
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: srli a4, a2, 4
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: and a2, a2, a7
+; RV32I-NEXT: slli a4, a2, 8
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: slli a4, a2, 16
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: srli a2, a2, 24
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: sw zero, 12(a0)
+; RV32I-NEXT: sw zero, 4(a0)
+; RV32I-NEXT: sw a2, 8(a0)
+; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_v2i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
index 73bfc64..acd63f2 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
@@ -317,8 +317,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -354,14 +352,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
index 7feef4d..b0e447b 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
@@ -307,8 +307,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -344,14 +342,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
@@ -623,8 +620,6 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-LABEL: ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -647,14 +642,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll b/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll
new file mode 100644
index 0000000..23eae33
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=riscv64 -passes=typepromotion -S %s | FileCheck %s
+
+; Test that this does not crash
+define i16 @test(i8 %a, i32 %b) {
+; CHECK-LABEL: define i16 @test(
+; CHECK-SAME: i8 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[TMP1]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = and i32 [[TMP0]], 255
+; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i16
+; CHECK-NEXT: ret i16 [[TMP7]]
+;
+entry:
+ %0 = zext i8 %a to i32
+ %1 = trunc i32 %b to i16
+ %2 = icmp eq i16 %1, 0
+ %3 = trunc i32 %0 to i8
+ %4 = zext i8 %3 to i16
+ %5 = xor i16 %4, %1
+ ret i16 %5
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
index 1f62ea9f..6cdab88 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
@@ -11,8 +11,6 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB0_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -38,14 +36,13 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 32
@@ -66,8 +63,6 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -93,14 +88,13 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: j .LBB1_3
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: li a0, 32
@@ -125,50 +119,45 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_ceil_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
+; RV64I-NEXT: addiw a1, a0, -1
+; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: li a2, 32
+; RV64I-NEXT: beqz a1, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a2, a1, 1
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: sub a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV64XTHEADBB-LABEL: log2_ceil_i32:
@@ -189,48 +178,42 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-LABEL: findLastSet_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a1, a0, 1
+; RV64I-NEXT: or a1, a0, a1
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
+; RV64I-NEXT: xori a1, a1, 31
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBB-LABEL: findLastSet_i32:
@@ -256,10 +239,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -285,14 +264,13 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 32
@@ -317,8 +295,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -354,14 +330,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index f810f51..c81c6ae 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -209,6 +209,24 @@ define i64 @sh1adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh1adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh1adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 31
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 1
+ %4 = and i64 %3, 8589934590
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh2adduw:
; RV64I: # %bb.0:
@@ -247,6 +265,24 @@ define i64 @sh2adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh2adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh2adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 30
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 2
+ %4 = and i64 %3, 17179869180
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define i64 @sh3adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh3adduw:
; RV64I: # %bb.0:
@@ -285,6 +321,24 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh3adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh3adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 29
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 3
+ %4 = and i64 %3, 34359738360
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
; Type legalization inserts a sext_inreg after the first add. That add will be
; selected as sh2add which does not sign extend. SimplifyDemandedBits is unable
; to remove the sext_inreg because it has multiple uses. The ashr will use the
@@ -335,6 +389,24 @@ define i64 @addmul6(i64 %a, i64 %b) {
ret i64 %d
}
+define i64 @disjointormul6(i64 %a, i64 %b) {
+; RV64I-LABEL: disjointormul6:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 6
+; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: disjointormul6:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add a0, a0, a0
+; RV64ZBA-NEXT: sh1add a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %c = mul i64 %a, 6
+ %d = or disjoint i64 %c, %b
+ ret i64 %d
+}
+
define i64 @addmul10(i64 %a, i64 %b) {
; RV64I-LABEL: addmul10:
; RV64I: # %bb.0:
@@ -1099,6 +1171,23 @@ define i64 @add4104(i64 %a) {
ret i64 %c
}
+define i64 @add4104_2(i64 %a) {
+; RV64I-LABEL: add4104_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, 1
+; RV64I-NEXT: addiw a1, a1, 8
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: add4104_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, 1026
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %c = or disjoint i64 %a, 4104
+ ret i64 %c
+}
+
define i64 @add8208(i64 %a) {
; RV64I-LABEL: add8208:
; RV64I: # %bb.0:
@@ -1282,6 +1371,96 @@ define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
ret i32 %1
}
+define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add1:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add1:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %add, %x
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add2:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add2:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: add a0, a1, a0
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %x
+ %add2 = add nsw i64 %add, %shl
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add3:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add3:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %x, %add
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add4:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add4:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %x, %shl
+ %add2 = add nsw i64 %add, %shl1
+ ret i64 %add2
+}
+
; Make sure we use sext.h+slli+srli for Zba+Zbb.
; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 2269d8d..4d5ef5d 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -11,8 +11,6 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB0_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -38,14 +36,13 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 32
@@ -64,8 +61,6 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -91,14 +86,13 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: j .LBB1_3
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: li a0, 32
@@ -121,50 +115,45 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_ceil_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
+; RV64I-NEXT: addiw a1, a0, -1
+; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: li a2, 32
+; RV64I-NEXT: beqz a1, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a2, a1, 1
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: sub a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: log2_ceil_i32:
@@ -183,48 +172,42 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-LABEL: findLastSet_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a1, a0, 1
+; RV64I-NEXT: or a1, a0, a1
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
+; RV64I-NEXT: xori a1, a1, 31
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: findLastSet_i32:
@@ -248,10 +231,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -277,14 +256,13 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 32
@@ -307,8 +285,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -344,14 +320,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
@@ -544,8 +519,6 @@ declare i32 @llvm.ctpop.i32(i32)
define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-LABEL: ctpop_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -560,14 +533,13 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i32:
@@ -657,8 +629,6 @@ define i1 @ctpop_i32_ne_one(i32 signext %a) nounwind {
define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-LABEL: ctpop_i32_load:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
@@ -674,14 +644,13 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i32_load:
@@ -699,58 +668,42 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV64I-LABEL: ctpop_v2i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -64
-; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw s3, a2, 1365
-; RV64I-NEXT: and a1, a1, s3
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw s4, a1, 819
-; RV64I-NEXT: and a1, a0, s4
+; RV64I-NEXT: srli a2, a0, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a0, a0, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a4, a0, a2
; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, s4
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw s5, a1, -241
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw s1, a1, 257
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw s2, a0, 24
-; RV64I-NEXT: srli a0, s0, 1
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: sub s0, s0, a0
-; RV64I-NEXT: and a0, s0, s4
-; RV64I-NEXT: srli s0, s0, 2
-; RV64I-NEXT: and a1, s0, s4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
-; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a4, a0
+; RV64I-NEXT: srli a4, a0, 4
+; RV64I-NEXT: add a0, a0, a4
+; RV64I-NEXT: lui a4, 61681
+; RV64I-NEXT: addi a4, a4, -241
+; RV64I-NEXT: and a0, a0, a4
+; RV64I-NEXT: slli a5, a0, 8
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 16
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: srliw a0, a0, 24
+; RV64I-NEXT: srli a5, a1, 1
+; RV64I-NEXT: and a3, a5, a3
+; RV64I-NEXT: sub a1, a1, a3
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: and a1, a1, a4
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_v2i32:
@@ -875,8 +828,6 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-LABEL: ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -899,14 +850,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i64:
@@ -998,66 +948,52 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV64I-LABEL: ctpop_v2i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -64
-; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
-; RV64I-NEXT: slli a3, a2, 32
-; RV64I-NEXT: add s3, a2, a3
-; RV64I-NEXT: and a1, a1, s3
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add s4, a1, a2
-; RV64I-NEXT: and a1, a0, s4
+; RV64I-NEXT: srli a2, a0, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: slli a4, a3, 32
+; RV64I-NEXT: add a3, a3, a4
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a0, a0, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: slli a4, a2, 32
+; RV64I-NEXT: add a2, a2, a4
+; RV64I-NEXT: and a4, a0, a2
; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, s4
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a4, a0
+; RV64I-NEXT: srli a4, a0, 4
+; RV64I-NEXT: add a0, a0, a4
+; RV64I-NEXT: lui a4, 61681
+; RV64I-NEXT: addiw a4, a4, -241
+; RV64I-NEXT: slli a5, a4, 32
+; RV64I-NEXT: add a4, a4, a5
+; RV64I-NEXT: and a0, a0, a4
+; RV64I-NEXT: slli a5, a0, 8
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 16
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 32
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: srli a0, a0, 56
+; RV64I-NEXT: srli a5, a1, 1
+; RV64I-NEXT: and a3, a5, a3
+; RV64I-NEXT: sub a1, a1, a3
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: and a1, a1, a4
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add s5, a1, a2
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw s1, a1, 257
-; RV64I-NEXT: slli a1, s1, 32
-; RV64I-NEXT: add s1, s1, a1
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli s2, a0, 56
-; RV64I-NEXT: srli a0, s0, 1
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: sub s0, s0, a0
-; RV64I-NEXT: and a0, s0, s4
-; RV64I-NEXT: srli s0, s0, 2
-; RV64I-NEXT: and a1, s0, s4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli a1, a0, 56
-; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 56
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_v2i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
new file mode 100644
index 0000000..ddbfbd0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -0,0 +1,343 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+;
+; SABD
+;
+
+define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sabd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+ %sub = sub <vscale x 16 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+ %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: sabd_b_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, -1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: vmin.vv v10, v12, v8
+; CHECK-NEXT: vmax.vv v8, v12, v8
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+ %sub = sub <vscale x 16 x i8> %a.sext, %b.sext
+ %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+ ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sabd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+ %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+ %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sabd_h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sabd_s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.sext, %b.sext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: sabd_s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.sext, %b.sext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; FIXME: Crashes legalization if enabled
+;; define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+;; %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+;; %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+;; %sub = sub <vscale x 2 x i128> %a.sext, %b.sext
+;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+;; ret <vscale x 2 x i64> %trunc
+;; }
+
+define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: sabd_d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %sub = sub <vscale x 2 x i64> %a.sext, %b.sext
+ %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+ ret <vscale x 2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uabd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+ %sub = sub <vscale x 16 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+ %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: uabd_b_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vminu.vv v10, v12, v8
+; CHECK-NEXT: vmaxu.vv v8, v12, v8
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+ %sub = sub <vscale x 16 x i8> %a.zext, %b.zext
+ %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+ ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uabd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+ %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: uabd_h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uabd_s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: uabd_s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; FIXME: Crashes legalization if enabled
+;; define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+;; %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+;; %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+;; %sub = sub <vscale x 2 x i128> %a.zext, %b.zext
+;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+;; ret <vscale x 2 x i64> %trunc
+;; }
+
+define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: uabd_d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %sub = sub <vscale x 2 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+ ret <vscale x 2 x i64> %abs
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and
+; %b have differing types.
+define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: uabd_non_matching_extension:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v10
+; CHECK-NEXT: vminu.vv v10, v8, v12
+; CHECK-NEXT: vmaxu.vv v8, v8, v12
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a.zext)) returns true but
+; %a and %b have differing types.
+define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: uabd_non_matching_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vminu.vv v8, v10, v9
+; CHECK-NEXT: vmaxu.vv v9, v10, v9
+; CHECK-NEXT: vsub.vv v10, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and
+; %b are promoted differently.
+define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: uabd_non_matching_promotion:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v9
+; CHECK-NEXT: vwsub.wv v10, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
+
+declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
+declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
+
+declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
+declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
+
+declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
+declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
+
+declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
index 5255728..080a89e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
@@ -22,7 +22,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 16
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
functionContext: ''
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
index 1d025a2..1fe91c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
@@ -18,15 +18,15 @@ define void @test(ptr %addr) {
; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: slli a2, a1, 1
-; CHECK-NEXT: add a3, a0, a2
-; CHECK-NEXT: vl1re64.v v9, (a3)
+; CHECK-NEXT: vl1re64.v v9, (a0)
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: vl1re64.v v10, (a0)
; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v9, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v9, (a2)
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vs1r.v v8, (a1)
-; CHECK-NEXT: vs1r.v v10, (a0)
+; CHECK-NEXT: vs1r.v v10, (a2)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index 64031f8..a9a680d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -16,13 +16,13 @@ define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrrs a2, vlenb, zero
-; CHECK-NEXT: add a3, a0, a2
-; CHECK-NEXT: vl1re64.v v8, (a3)
+; CHECK-NEXT: vl1re64.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v8, (a2)
-; CHECK-NEXT: vs1r.v v9, (a0)
+; CHECK-NEXT: vs1r.v v9, (a2)
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
new file mode 100644
index 0000000..2d5258f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
@@ -0,0 +1,154 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+; Check that we perform binary arithmetic in a narrower type where possible, via
+; combineBinOpOfZExt or otherwise.
+
+define <vscale x 8 x i32> @add(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %add = add <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %add
+}
+
+define <vscale x 8 x i32> @sub(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwsubu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %sub
+}
+
+define <vscale x 8 x i32> @mul(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: mul:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %mul = mul <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %mul
+}
+
+define <vscale x 8 x i32> @sdiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sdiv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %sdiv = sdiv <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %sdiv
+}
+
+define <vscale x 8 x i32> @udiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: udiv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %udiv = udiv <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %udiv
+}
+
+define <vscale x 8 x i32> @srem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: srem:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %srem = srem <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %srem
+}
+
+define <vscale x 8 x i32> @urem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: urem:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %urem = urem <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %urem
+}
+
+define <vscale x 8 x i32> @and(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: and:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vand.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %shl = and <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %shl
+}
+
+define <vscale x 8 x i32> @or(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vor.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %or = or <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %or
+}
+
+define <vscale x 8 x i32> @xor(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: xor:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vxor.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %xor = xor <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %xor
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
new file mode 100644
index 0000000..84936d8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -mattr=+v -O2 < %s \
+; RUN: | FileCheck --check-prefix=SPILL-O2 %s
+
+define <vscale x 1 x i32> @test_vector_std(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_std:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
+
+define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_callee:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl2r.v v2, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 78385a8..90edb99 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -86,3 +86,166 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
%a = call <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> %x)
ret <vscale x 32 x i32> %a
}
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
+; RV32-LABEL: caller_tuple_return:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call callee_tuple_return
+; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_tuple_return:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call callee_tuple_return
+; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
+ %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
+ %c = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 1
+ %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %c, 0
+ %e = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %d, <vscale x 4 x i32> %b, 1
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %e
+}
+
+declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
+
+define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x) {
+; RV32-LABEL: caller_tuple_argument:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: call callee_tuple_argument
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_tuple_argument:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: call callee_tuple_argument
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 0
+ %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 1
+ %c = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %b, 0
+ %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, <vscale x 4 x i32> %a, 1
+ call void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %d)
+ ret void
+}
+
+declare void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>})
+
+; %0 -> v8
+; %1 -> v9
+define <vscale x 1 x i64> @case1(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1) {
+; CHECK-LABEL: case1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %1
+ ret <vscale x 1 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> v10-v11
+; %2 -> v9
+define <vscale x 1 x i64> @case2_1(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %a
+}
+define <vscale x 2 x i64> @case2_2(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v10
+; CHECK-NEXT: ret
+ %a = add <vscale x 2 x i64> %1, %1
+ ret <vscale x 2 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> {v10-v11, v12-v13}
+; %2 -> v9
+define <vscale x 1 x i64> @case3_1(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
+define <vscale x 2 x i64> @case3_2(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 0
+ %b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 1
+ %add = add <vscale x 2 x i64> %a, %b
+ ret <vscale x 2 x i64> %add
+}
+
+; %0 -> v8
+; %1 -> {by-ref, by-ref}
+; %2 -> v9
+define <vscale x 8 x i64> @case4_1(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vl8re64.v v8, (a1)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 0
+ %b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 1
+ %add = add <vscale x 8 x i64> %a, %b
+ ret <vscale x 8 x i64> %add
+}
+define <vscale x 1 x i64> @case4_2(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
new file mode 100644
index 0000000..673008d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -0,0 +1,871 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV64
+; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV32
+
+; Compress + store for i8 type
+
+define void @test_compresstore_v1i8(ptr %p, <1 x i1> %mask, <1 x i8> %data) {
+; RV64-LABEL: test_compresstore_v1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i8(<1 x i8> %data, ptr align 1 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i8(ptr %p, <2 x i1> %mask, <2 x i8> %data) {
+; RV64-LABEL: test_compresstore_v2i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i8(<2 x i8> %data, ptr align 1 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i8(ptr %p, <4 x i1> %mask, <4 x i8> %data) {
+; RV64-LABEL: test_compresstore_v4i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i8(<4 x i8> %data, ptr align 1 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i8(ptr %p, <8 x i1> %mask, <8 x i8> %data) {
+; RV64-LABEL: test_compresstore_v8i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i8(<8 x i8> %data, ptr align 1 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i8(ptr %p, <16 x i1> %mask, <16 x i8> %data) {
+; RV64-LABEL: test_compresstore_v16i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vse8.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vse8.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i8(<16 x i8> %data, ptr align 1 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i8(ptr %p, <32 x i1> %mask, <32 x i8> %data) {
+; RV64-LABEL: test_compresstore_v32i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vse8.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vse8.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i8(<32 x i8> %data, ptr align 1 %p, <32 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v64i8(ptr %p, <64 x i1> %mask, <64 x i8> %data) {
+; RV64-LABEL: test_compresstore_v64i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vse8.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v64i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vse8.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v64i8(<64 x i8> %data, ptr align 1 %p, <64 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v128i8(ptr %p, <128 x i1> %mask, <128 x i8> %data) {
+; RV64-LABEL: test_compresstore_v128i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 128
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vse8.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v128i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 128
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vse8.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v128i8(<128 x i8> %data, ptr align 1 %p, <128 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
+; RV64-LABEL: test_compresstore_v256i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v7, v8
+; RV64-NEXT: li a2, 128
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v24, (a1)
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v0, 1
+; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcompress.vm v8, v16, v0
+; RV64-NEXT: vcpop.m a4, v0
+; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcompress.vm v8, v24, v7
+; RV64-NEXT: vcpop.m a2, v7
+; RV64-NEXT: cpop a3, a3
+; RV64-NEXT: cpop a1, a1
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v256i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vmv1r.v v7, v8
+; RV32-NEXT: li a2, 128
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v24, (a1)
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v0, 1
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsrl.vx v10, v9, a1
+; RV32-NEXT: vmv.x.s a3, v10
+; RV32-NEXT: vsrl.vx v10, v0, a1
+; RV32-NEXT: vmv.x.s a1, v10
+; RV32-NEXT: vmv.x.s a4, v9
+; RV32-NEXT: vmv.x.s a5, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v16, v0
+; RV32-NEXT: vcpop.m a6, v0
+; RV32-NEXT: vsetvli zero, a6, e8, m8, ta, ma
+; RV32-NEXT: vse8.v v8, (a0)
+; RV32-NEXT: cpop a1, a1
+; RV32-NEXT: cpop a5, a5
+; RV32-NEXT: add a1, a5, a1
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: cpop a4, a4
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: add a1, a1, a3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v24, v7
+; RV32-NEXT: vcpop.m a1, v7
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vse8.v v8, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v256i8(<256 x i8> %data, ptr align 1 %p, <256 x i1> %mask)
+ ret void
+}
+
+; Compress + store for i16 type
+
+define void @test_compresstore_v1i16(ptr %p, <1 x i1> %mask, <1 x i16> %data) {
+; RV64-LABEL: test_compresstore_v1i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i16(<1 x i16> %data, ptr align 2 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i16(ptr %p, <2 x i1> %mask, <2 x i16> %data) {
+; RV64-LABEL: test_compresstore_v2i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i16(<2 x i16> %data, ptr align 2 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i16(ptr %p, <4 x i1> %mask, <4 x i16> %data) {
+; RV64-LABEL: test_compresstore_v4i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i16(<4 x i16> %data, ptr align 2 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i16(ptr %p, <8 x i1> %mask, <8 x i16> %data) {
+; RV64-LABEL: test_compresstore_v8i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i16(<8 x i16> %data, ptr align 2 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i16(ptr %p, <16 x i1> %mask, <16 x i16> %data) {
+; RV64-LABEL: test_compresstore_v16i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vse16.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vse16.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i16(<16 x i16> %data, ptr align 2 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i16(ptr %p, <32 x i1> %mask, <32 x i16> %data) {
+; RV64-LABEL: test_compresstore_v32i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT: vse16.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT: vse16.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i16(<32 x i16> %data, ptr align 2 %p, <32 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v64i16(ptr %p, <64 x i1> %mask, <64 x i16> %data) {
+; RV64-LABEL: test_compresstore_v64i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vse16.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v64i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vse16.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v64i16(<64 x i16> %data, ptr align 2 %p, <64 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v128i16(ptr %p, <128 x i1> %mask, <128 x i16> %data) {
+; RV64-LABEL: test_compresstore_v128i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v8, v0
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT: vse16.v v24, (a0)
+; RV64-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v0, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v16, v8
+; RV64-NEXT: vcpop.m a2, v8
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmv.x.s a1, v0
+; RV64-NEXT: cpop a1, a1
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT: vse16.v v24, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v128i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v8, v0
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV32-NEXT: vse16.v v24, (a0)
+; RV32-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v0, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v16, v24
+; RV32-NEXT: vcpop.m a1, v24
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vx v16, v0, a2
+; RV32-NEXT: vmv.x.s a2, v16
+; RV32-NEXT: cpop a2, a2
+; RV32-NEXT: vmv.x.s a3, v0
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v128i16(<128 x i16> %data, ptr align 2 %p, <128 x i1> %mask)
+ ret void
+}
+
+; Compress + store for i32 type
+
+define void @test_compresstore_v1i32(ptr %p, <1 x i1> %mask, <1 x i32> %data) {
+; RV64-LABEL: test_compresstore_v1i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i32(<1 x i32> %data, ptr align 4 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i32(ptr %p, <2 x i1> %mask, <2 x i32> %data) {
+; RV64-LABEL: test_compresstore_v2i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i32(<2 x i32> %data, ptr align 4 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i32(ptr %p, <4 x i1> %mask, <4 x i32> %data) {
+; RV64-LABEL: test_compresstore_v4i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i32(<4 x i32> %data, ptr align 4 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i32(ptr %p, <8 x i1> %mask, <8 x i32> %data) {
+; RV64-LABEL: test_compresstore_v8i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT: vse32.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT: vse32.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i32(<8 x i32> %data, ptr align 4 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i32(ptr %p, <16 x i1> %mask, <16 x i32> %data) {
+; RV64-LABEL: test_compresstore_v16i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vse32.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV32-NEXT: vse32.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i32(<16 x i32> %data, ptr align 4 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i32(ptr %p, <32 x i1> %mask, <32 x i32> %data) {
+; RV64-LABEL: test_compresstore_v32i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vse32.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vse32.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i32(<32 x i32> %data, ptr align 4 %p, <32 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v64i32(ptr %p, <64 x i1> %mask, <64 x i32> %data) {
+; RV64-LABEL: test_compresstore_v64i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v8, v0
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV64-NEXT: vse32.v v24, (a0)
+; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v0, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v16, v8
+; RV64-NEXT: vcpop.m a1, v8
+; RV64-NEXT: vmv.x.s a2, v0
+; RV64-NEXT: cpopw a2, a2
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: add a0, a0, a2
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vse32.v v24, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v64i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v8, v0
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vse32.v v24, (a0)
+; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v0, 4
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v16, v8
+; RV32-NEXT: vcpop.m a1, v8
+; RV32-NEXT: vmv.x.s a2, v0
+; RV32-NEXT: cpop a2, a2
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vse32.v v24, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v64i32(<64 x i32> %data, ptr align 4 %p, <64 x i1> %mask)
+ ret void
+}
+
+; Compress + store for i64 type
+
+define void @test_compresstore_v1i64(ptr %p, <1 x i1> %mask, <1 x i64> %data) {
+; RV64-LABEL: test_compresstore_v1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v1i64(<1 x i64> %data, ptr align 8 %p, <1 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %data) {
+; RV64-LABEL: test_compresstore_v2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v2i64(<2 x i64> %data, ptr align 8 %p, <2 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v4i64(ptr %p, <4 x i1> %mask, <4 x i64> %data) {
+; RV64-LABEL: test_compresstore_v4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vse64.v v10, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT: vse64.v v10, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v4i64(<4 x i64> %data, ptr align 8 %p, <4 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v8i64(ptr %p, <8 x i1> %mask, <8 x i64> %data) {
+; RV64-LABEL: test_compresstore_v8i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vse64.v v12, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v8i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT: vse64.v v12, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v8i64(<8 x i64> %data, ptr align 8 %p, <8 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v16i64(ptr %p, <16 x i1> %mask, <16 x i64> %data) {
+; RV64-LABEL: test_compresstore_v16i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vcompress.vm v16, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vse64.v v16, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v16i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vcompress.vm v16, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vse64.v v16, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v16i64(<16 x i64> %data, ptr align 8 %p, <16 x i1> %mask)
+ ret void
+}
+
+define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data) {
+; RV64-LABEL: test_compresstore_v32i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vcompress.vm v24, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vse64.v v24, (a0)
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vcompress.vm v8, v16, v24
+; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT: vmv.x.s a1, v0
+; RV64-NEXT: zext.h a1, a1
+; RV64-NEXT: cpopw a1, a1
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vcpop.m a1, v24
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_compresstore_v32i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vcompress.vm v24, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vse64.v v24, (a0)
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vcompress.vm v8, v16, v24
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: vmv.x.s a1, v0
+; RV32-NEXT: zext.h a1, a1
+; RV32-NEXT: cpop a1, a1
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: vcpop.m a1, v24
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: ret
+entry:
+ tail call void @llvm.masked.compressstore.v32i64(<32 x i64> %data, ptr align 8 %p, <32 x i1> %mask)
+ ret void
+}
+
+declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i8(<16 x i8>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i8(<32 x i8>, ptr, <32 x i1>)
+declare void @llvm.masked.compressstore.v64i8(<64 x i8>, ptr, <64 x i1>)
+declare void @llvm.masked.compressstore.v128i8(<128 x i8>, ptr, <128 x i1>)
+declare void @llvm.masked.compressstore.v256i8(<256 x i8>, ptr, <256 x i1>)
+
+declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i16(<16 x i16>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i16(<32 x i16>, ptr, <32 x i1>)
+declare void @llvm.masked.compressstore.v64i16(<64 x i16>, ptr, <64 x i1>)
+declare void @llvm.masked.compressstore.v128i16(<128 x i16>, ptr, <128 x i1>)
+
+declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i32(<16 x i32>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i32(<32 x i32>, ptr, <32 x i1>)
+declare void @llvm.masked.compressstore.v64i32(<64 x i32>, ptr, <64 x i1>)
+
+declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
+declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
+declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
+declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
+declare void @llvm.masked.compressstore.v16i64(<16 x i64>, ptr, <16 x i1>)
+declare void @llvm.masked.compressstore.v32i64(<32 x i64>, ptr, <32 x i1>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
new file mode 100644
index 0000000..bd1209a17b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -0,0 +1,727 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+;
+; SABD
+;
+
+define <8 x i8> @sabd_8b_as_16b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8b_as_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i16>
+ %b.sext = sext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.sext, %b.sext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ %trunc = trunc <8 x i16> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <8 x i8> @sabd_8b_as_32b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8b_as_32b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i32>
+ %b.sext = sext <8 x i8> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.sext, %b.sext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: sabd_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <16 x i8> %a to <16 x i16>
+ %b.sext = sext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.sext, %b.sext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: sabd_4h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i16> %a to <4 x i32>
+ %b.sext = sext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.sext, %b.sext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ %trunc = trunc <4 x i32> %abs to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+;
+; CHECK-LABEL: sabd_4h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i8> %a to <4 x i16>
+ %b.sext = sext <4 x i8> %b to <4 x i16>
+ %sub = sub <4 x i16> %a.sext, %b.sext
+ %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+ ret <4 x i16> %abs
+}
+
+define <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: sabd_8h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i16> %a to <8 x i32>
+ %b.sext = sext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.sext, %b.sext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i16>
+ %b.sext = sext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.sext, %b.sext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: sabd_2s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i32> %a to <2 x i64>
+ %b.sext = sext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.sext, %b.sext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ %trunc = trunc <2 x i64> %abs to <2 x i32>
+ ret <2 x i32> %trunc
+}
+
+define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+;
+; CHECK-LABEL: sabd_2s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i16> %a to <2 x i32>
+ %b.sext = sext <2 x i16> %b to <2 x i32>
+ %sub = sub <2 x i32> %a.sext, %b.sext
+ %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+ ret <2 x i32> %abs
+}
+
+define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: sabd_4s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i32> %a to <4 x i64>
+ %b.sext = sext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.sext, %b.sext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: sabd_4s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i16> %a to <4 x i32>
+ %b.sext = sext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.sext, %b.sext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: sabd_2d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i64> %a to <2 x i128>
+ %b.sext = sext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.sext, %b.sext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: sabd_2d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i32> %a to <2 x i64>
+ %b.sext = sext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.sext, %b.sext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: uabd_8b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i8> %a to <8 x i16>
+ %b.zext = zext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.zext, %b.zext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ %trunc = trunc <8 x i16> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: uabd_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <16 x i8> %a to <16 x i16>
+ %b.zext = zext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.zext, %b.zext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: uabd_4h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i16> %a to <4 x i32>
+ %b.zext = zext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.zext, %b.zext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ %trunc = trunc <4 x i32> %abs to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+;
+; CHECK-LABEL: uabd_4h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i8> %a to <4 x i16>
+ %b.zext = zext <4 x i8> %b to <4 x i16>
+ %sub = sub <4 x i16> %a.zext, %b.zext
+ %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+ ret <4 x i16> %abs
+}
+
+define <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: uabd_8h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i16> %a to <8 x i32>
+ %b.zext = zext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.zext, %b.zext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: uabd_8h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i8> %a to <8 x i16>
+ %b.zext = zext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.zext, %b.zext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: uabd_2s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i32> %a to <2 x i64>
+ %b.zext = zext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.zext, %b.zext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ %trunc = trunc <2 x i64> %abs to <2 x i32>
+ ret <2 x i32> %trunc
+}
+
+define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+;
+; CHECK-LABEL: uabd_2s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i16> %a to <2 x i32>
+ %b.zext = zext <2 x i16> %b to <2 x i32>
+ %sub = sub <2 x i32> %a.zext, %b.zext
+ %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+ ret <2 x i32> %abs
+}
+
+define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: uabd_4s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i32> %a to <4 x i64>
+ %b.zext = zext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.zext, %b.zext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: uabd_4s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i16> %a to <4 x i32>
+ %b.zext = zext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.zext, %b.zext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: uabd_2d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i64> %a to <2 x i128>
+ %b.zext = zext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.zext, %b.zext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: uabd_2d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i32> %a to <2 x i64>
+ %b.zext = zext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.zext, %b.zext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: uabd_v16i8_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <16 x i8> %a, %b
+ %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+ ret <16 x i8> %abs
+}
+
+define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: uabd_v8i16_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <8 x i16> %a, %b
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: uabd_v4i32_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <4 x i32> %a, %b
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
+;
+; CHECK-LABEL: uabd_v2i64_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <2 x i64> %a, %b
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: sabd_v16i8_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <16 x i8> %a, %b
+ %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+ ret <16 x i8> %abs
+}
+
+define <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: sabd_v8i16_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <8 x i16> %a, %b
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: sabd_v4i32_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <4 x i32> %a, %b
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
+;
+; CHECK-LABEL: sabd_v2i64_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <2 x i64> %a, %b
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: smaxmin_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+;
+; CHECK-LABEL: smaxmin_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %sub = sub <8 x i16> %a, %b
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+;
+; CHECK-LABEL: smaxmin_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %sub = sub <4 x i32> %a, %b
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+;
+; CHECK-LABEL: smaxmin_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %sub = sub <2 x i64> %a, %b
+ ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: umaxmin_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+;
+; CHECK-LABEL: umaxmin_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %sub = sub <8 x i16> %a, %b
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+;
+; CHECK-LABEL: umaxmin_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %sub = sub <4 x i32> %a, %b
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+;
+; CHECK-LABEL: umaxmin_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %sub = sub <2 x i64> %a, %b
+ ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: umaxmin_v16i8_com1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1)
+declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
+
+declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
+declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+
+declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1)
+declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
+
+declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+
+declare <2 x i128> @llvm.abs.v2i128(<2 x i128>, i1)
+
+declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index b7afee7..5252eb7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -416,8 +416,8 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) {
; RV32ELEN32: # %bb.0:
; RV32ELEN32-NEXT: addi sp, sp, -16
; RV32ELEN32-NEXT: .cfi_def_cfa_offset 16
-; RV32ELEN32-NEXT: sw a1, 12(sp)
; RV32ELEN32-NEXT: sw a0, 8(sp)
+; RV32ELEN32-NEXT: sw a1, 12(sp)
; RV32ELEN32-NEXT: fld fa0, 8(sp)
; RV32ELEN32-NEXT: addi sp, sp, 16
; RV32ELEN32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
index 52c5292..36fbdd8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
@@ -6,24 +6,20 @@ declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>)
define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB0_2
-; RV32-NEXT: # %bb.1: # %cond.store
; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: .LBB0_2: # %else
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v1f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB0_2
-; RV64-NEXT: # %bb.1: # %cond.store
; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: .LBB0_2: # %else
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr align 2 %base, <1 x i1> %mask)
ret void
@@ -33,48 +29,20 @@ declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB1_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB1_4
-; RV32-NEXT: .LBB1_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB1_3: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB1_2
-; RV32-NEXT: .LBB1_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vse16.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v2f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB1_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB1_4
-; RV64-NEXT: .LBB1_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB1_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB1_2
-; RV64-NEXT: .LBB1_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse16.v v8, (a0)
+; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vse16.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr align 2 %base, <2 x i1> %mask)
ret void
@@ -84,88 +52,20 @@ declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB2_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB2_6
-; RV32-NEXT: .LBB2_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB2_7
-; RV32-NEXT: .LBB2_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB2_8
-; RV32-NEXT: .LBB2_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB2_5: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB2_2
-; RV32-NEXT: .LBB2_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB2_3
-; RV32-NEXT: .LBB2_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 2
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB2_4
-; RV32-NEXT: .LBB2_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vse16.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v4f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB2_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB2_6
-; RV64-NEXT: .LBB2_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB2_7
-; RV64-NEXT: .LBB2_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB2_8
-; RV64-NEXT: .LBB2_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB2_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB2_2
-; RV64-NEXT: .LBB2_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB2_3
-; RV64-NEXT: .LBB2_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 2
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB2_4
-; RV64-NEXT: .LBB2_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr align 2 %base, <4 x i1> %mask)
ret void
@@ -175,168 +75,20 @@ declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB3_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB3_10
-; RV32-NEXT: .LBB3_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB3_11
-; RV32-NEXT: .LBB3_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB3_12
-; RV32-NEXT: .LBB3_4: # %else8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB3_13
-; RV32-NEXT: .LBB3_5: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB3_14
-; RV32-NEXT: .LBB3_6: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB3_15
-; RV32-NEXT: .LBB3_7: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB3_16
-; RV32-NEXT: .LBB3_8: # %else20
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB3_9: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB3_2
-; RV32-NEXT: .LBB3_10: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB3_3
-; RV32-NEXT: .LBB3_11: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 2
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB3_4
-; RV32-NEXT: .LBB3_12: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 3
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB3_5
-; RV32-NEXT: .LBB3_13: # %cond.store10
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 4
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB3_6
-; RV32-NEXT: .LBB3_14: # %cond.store13
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 5
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB3_7
-; RV32-NEXT: .LBB3_15: # %cond.store16
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 6
-; RV32-NEXT: vse16.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB3_8
-; RV32-NEXT: .LBB3_16: # %cond.store19
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vse16.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v8f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB3_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB3_10
-; RV64-NEXT: .LBB3_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB3_11
-; RV64-NEXT: .LBB3_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB3_12
-; RV64-NEXT: .LBB3_4: # %else8
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB3_13
-; RV64-NEXT: .LBB3_5: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB3_14
-; RV64-NEXT: .LBB3_6: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB3_15
-; RV64-NEXT: .LBB3_7: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB3_16
-; RV64-NEXT: .LBB3_8: # %else20
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB3_9: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB3_2
-; RV64-NEXT: .LBB3_10: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB3_3
-; RV64-NEXT: .LBB3_11: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 2
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB3_4
-; RV64-NEXT: .LBB3_12: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 3
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB3_5
-; RV64-NEXT: .LBB3_13: # %cond.store10
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 4
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB3_6
-; RV64-NEXT: .LBB3_14: # %cond.store13
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 5
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB3_7
-; RV64-NEXT: .LBB3_15: # %cond.store16
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 6
-; RV64-NEXT: vse16.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB3_8
-; RV64-NEXT: .LBB3_16: # %cond.store19
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr align 2 %base, <8 x i1> %mask)
ret void
@@ -346,24 +98,20 @@ declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>)
define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB4_2
-; RV32-NEXT: # %bb.1: # %cond.store
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: .LBB4_2: # %else
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v1f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB4_2
-; RV64-NEXT: # %bb.1: # %cond.store
; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: .LBB4_2: # %else
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr align 4 %base, <1 x i1> %mask)
ret void
@@ -373,48 +121,20 @@ declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>)
define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB5_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB5_4
-; RV32-NEXT: .LBB5_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB5_3: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB5_2
-; RV32-NEXT: .LBB5_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vse32.v v8, (a0)
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vse32.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v2f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB5_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB5_4
-; RV64-NEXT: .LBB5_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB5_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB5_2
-; RV64-NEXT: .LBB5_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse32.v v8, (a0)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vse32.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr align 4 %base, <2 x i1> %mask)
ret void
@@ -424,88 +144,20 @@ declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>)
define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB6_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB6_6
-; RV32-NEXT: .LBB6_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB6_7
-; RV32-NEXT: .LBB6_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB6_8
-; RV32-NEXT: .LBB6_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB6_5: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB6_2
-; RV32-NEXT: .LBB6_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vse32.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB6_3
-; RV32-NEXT: .LBB6_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v9, v8, 2
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; RV32-NEXT: vse32.v v9, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB6_4
-; RV32-NEXT: .LBB6_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v4f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB6_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB6_6
-; RV64-NEXT: .LBB6_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB6_7
-; RV64-NEXT: .LBB6_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB6_8
-; RV64-NEXT: .LBB6_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB6_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB6_2
-; RV64-NEXT: .LBB6_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vse32.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB6_3
-; RV64-NEXT: .LBB6_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v9, v8, 2
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; RV64-NEXT: vse32.v v9, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB6_4
-; RV64-NEXT: .LBB6_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vse32.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr align 4 %base, <4 x i1> %mask)
ret void
@@ -515,176 +167,20 @@ declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>)
define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB7_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB7_10
-; RV32-NEXT: .LBB7_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB7_11
-; RV32-NEXT: .LBB7_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB7_12
-; RV32-NEXT: .LBB7_4: # %else8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB7_13
-; RV32-NEXT: .LBB7_5: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB7_14
-; RV32-NEXT: .LBB7_6: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB7_15
-; RV32-NEXT: .LBB7_7: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB7_16
-; RV32-NEXT: .LBB7_8: # %else20
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB7_9: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB7_2
-; RV32-NEXT: .LBB7_10: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 1
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB7_3
-; RV32-NEXT: .LBB7_11: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB7_4
-; RV32-NEXT: .LBB7_12: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 3
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB7_5
-; RV32-NEXT: .LBB7_13: # %cond.store10
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 4
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB7_6
-; RV32-NEXT: .LBB7_14: # %cond.store13
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 5
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB7_7
-; RV32-NEXT: .LBB7_15: # %cond.store16
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 6
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB7_8
-; RV32-NEXT: .LBB7_16: # %cond.store19
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB7_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB7_10
-; RV64-NEXT: .LBB7_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB7_11
-; RV64-NEXT: .LBB7_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB7_12
-; RV64-NEXT: .LBB7_4: # %else8
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB7_13
-; RV64-NEXT: .LBB7_5: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB7_14
-; RV64-NEXT: .LBB7_6: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB7_15
-; RV64-NEXT: .LBB7_7: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB7_16
-; RV64-NEXT: .LBB7_8: # %else20
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB7_9: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB7_2
-; RV64-NEXT: .LBB7_10: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB7_3
-; RV64-NEXT: .LBB7_11: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB7_4
-; RV64-NEXT: .LBB7_12: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 3
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB7_5
-; RV64-NEXT: .LBB7_13: # %cond.store10
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 4
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB7_6
-; RV64-NEXT: .LBB7_14: # %cond.store13
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 5
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB7_7
-; RV64-NEXT: .LBB7_15: # %cond.store16
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 6
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vse32.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB7_8
-; RV64-NEXT: .LBB7_16: # %cond.store19
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT: vse32.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr align 4 %base, <8 x i1> %mask)
ret void
@@ -694,24 +190,20 @@ declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>)
define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB8_2
-; RV32-NEXT: # %bb.1: # %cond.store
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: .LBB8_2: # %else
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v1f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB8_2
-; RV64-NEXT: # %bb.1: # %cond.store
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: .LBB8_2: # %else
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr align 8 %base, <1 x i1> %mask)
ret void
@@ -721,48 +213,20 @@ declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB9_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB9_4
-; RV32-NEXT: .LBB9_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB9_3: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB9_2
-; RV32-NEXT: .LBB9_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vcompress.vm v9, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vse64.v v9, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB9_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB9_4
-; RV64-NEXT: .LBB9_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB9_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB9_2
-; RV64-NEXT: .LBB9_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vcompress.vm v9, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vse64.v v9, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr align 8 %base, <2 x i1> %mask)
ret void
@@ -772,92 +236,20 @@ declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB10_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB10_6
-; RV32-NEXT: .LBB10_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB10_7
-; RV32-NEXT: .LBB10_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB10_8
-; RV32-NEXT: .LBB10_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB10_5: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB10_2
-; RV32-NEXT: .LBB10_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vcompress.vm v10, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV32-NEXT: vse64.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB10_3
-; RV32-NEXT: .LBB10_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v10, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB10_4
-; RV32-NEXT: .LBB10_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v4f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB10_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB10_6
-; RV64-NEXT: .LBB10_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB10_7
-; RV64-NEXT: .LBB10_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB10_8
-; RV64-NEXT: .LBB10_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB10_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB10_2
-; RV64-NEXT: .LBB10_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB10_3
-; RV64-NEXT: .LBB10_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vcompress.vm v10, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB10_4
-; RV64-NEXT: .LBB10_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr align 8 %base, <4 x i1> %mask)
ret void
@@ -867,213 +259,21 @@ declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
define void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB11_11
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB11_12
-; RV32-NEXT: .LBB11_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB11_13
-; RV32-NEXT: .LBB11_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB11_5
-; RV32-NEXT: .LBB11_4: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 3
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v12, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: .LBB11_5: # %else8
-; RV32-NEXT: addi sp, sp, -320
-; RV32-NEXT: .cfi_def_cfa_offset 320
-; RV32-NEXT: sw ra, 316(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 312(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: addi s0, sp, 320
-; RV32-NEXT: .cfi_def_cfa s0, 0
-; RV32-NEXT: andi sp, sp, -64
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB11_14
-; RV32-NEXT: # %bb.6: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB11_15
-; RV32-NEXT: .LBB11_7: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB11_16
-; RV32-NEXT: .LBB11_8: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB11_10
-; RV32-NEXT: .LBB11_9: # %cond.store19
-; RV32-NEXT: mv a1, sp
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a1)
-; RV32-NEXT: fld fa5, 56(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: .LBB11_10: # %else20
-; RV32-NEXT: addi sp, s0, -320
-; RV32-NEXT: lw ra, 316(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 312(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 320
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB11_11: # %cond.store
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB11_2
-; RV32-NEXT: .LBB11_12: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 1
+; RV32-NEXT: vcompress.vm v12, v8, v0
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vse64.v v12, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB11_3
-; RV32-NEXT: .LBB11_13: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vse64.v v12, (a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB11_4
-; RV32-NEXT: j .LBB11_5
-; RV32-NEXT: .LBB11_14: # %cond.store10
-; RV32-NEXT: addi a2, sp, 192
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a2)
-; RV32-NEXT: fld fa5, 224(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB11_7
-; RV32-NEXT: .LBB11_15: # %cond.store13
-; RV32-NEXT: addi a2, sp, 128
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a2)
-; RV32-NEXT: fld fa5, 168(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB11_8
-; RV32-NEXT: .LBB11_16: # %cond.store16
-; RV32-NEXT: addi a2, sp, 64
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vse64.v v8, (a2)
-; RV32-NEXT: fld fa5, 112(sp)
-; RV32-NEXT: fsd fa5, 0(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB11_9
-; RV32-NEXT: j .LBB11_10
+; RV32-NEXT: ret
;
; RV64-LABEL: compressstore_v8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB11_11
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB11_12
-; RV64-NEXT: .LBB11_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB11_13
-; RV64-NEXT: .LBB11_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB11_5
-; RV64-NEXT: .LBB11_4: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: .LBB11_5: # %else8
-; RV64-NEXT: addi sp, sp, -320
-; RV64-NEXT: .cfi_def_cfa_offset 320
-; RV64-NEXT: sd ra, 312(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 304(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: addi s0, sp, 320
-; RV64-NEXT: .cfi_def_cfa s0, 0
-; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB11_14
-; RV64-NEXT: # %bb.6: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB11_15
-; RV64-NEXT: .LBB11_7: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB11_16
-; RV64-NEXT: .LBB11_8: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB11_10
-; RV64-NEXT: .LBB11_9: # %cond.store19
-; RV64-NEXT: mv a1, sp
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a1)
-; RV64-NEXT: fld fa5, 56(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: .LBB11_10: # %else20
-; RV64-NEXT: addi sp, s0, -320
-; RV64-NEXT: ld ra, 312(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 304(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 320
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB11_11: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB11_2
-; RV64-NEXT: .LBB11_12: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 1
+; RV64-NEXT: vcompress.vm v12, v8, v0
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB11_3
-; RV64-NEXT: .LBB11_13: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB11_4
-; RV64-NEXT: j .LBB11_5
-; RV64-NEXT: .LBB11_14: # %cond.store10
-; RV64-NEXT: addi a2, sp, 192
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: fld fa5, 224(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB11_7
-; RV64-NEXT: .LBB11_15: # %cond.store13
-; RV64-NEXT: addi a2, sp, 128
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: fld fa5, 168(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB11_8
-; RV64-NEXT: .LBB11_16: # %cond.store16
-; RV64-NEXT: addi a2, sp, 64
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: fld fa5, 112(sp)
-; RV64-NEXT: fsd fa5, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB11_9
-; RV64-NEXT: j .LBB11_10
+; RV64-NEXT: ret
call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr align 8 %base, <8 x i1> %mask)
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
index eb0096d..a388ba9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
@@ -6,13 +6,11 @@ declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) {
; CHECK-LABEL: compressstore_v1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB0_2
-; CHECK-NEXT: # %bb.1: # %cond.store
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: .LBB0_2: # %else
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vse8.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i8(<1 x i8> %v, ptr %base, <1 x i1> %mask)
ret void
@@ -22,25 +20,11 @@ declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) {
; CHECK-LABEL: compressstore_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB1_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB1_4
-; CHECK-NEXT: .LBB1_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB1_3: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB1_2
-; CHECK-NEXT: .LBB1_4: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vse8.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i8(<2 x i8> %v, ptr %base, <2 x i1> %mask)
ret void
@@ -50,45 +34,11 @@ declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) {
; CHECK-LABEL: compressstore_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB2_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB2_6
-; CHECK-NEXT: .LBB2_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB2_7
-; CHECK-NEXT: .LBB2_3: # %else5
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB2_8
-; CHECK-NEXT: .LBB2_4: # %else8
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB2_5: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB2_2
-; CHECK-NEXT: .LBB2_6: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB2_3
-; CHECK-NEXT: .LBB2_7: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB2_4
-; CHECK-NEXT: .LBB2_8: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i8(<4 x i8> %v, ptr %base, <4 x i1> %mask)
ret void
@@ -98,85 +48,11 @@ declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) {
; CHECK-LABEL: compressstore_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB3_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB3_10
-; CHECK-NEXT: .LBB3_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB3_11
-; CHECK-NEXT: .LBB3_3: # %else5
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB3_12
-; CHECK-NEXT: .LBB3_4: # %else8
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB3_13
-; CHECK-NEXT: .LBB3_5: # %else11
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB3_14
-; CHECK-NEXT: .LBB3_6: # %else14
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB3_15
-; CHECK-NEXT: .LBB3_7: # %else17
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB3_16
-; CHECK-NEXT: .LBB3_8: # %else20
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB3_9: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB3_2
-; CHECK-NEXT: .LBB3_10: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB3_3
-; CHECK-NEXT: .LBB3_11: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB3_4
-; CHECK-NEXT: .LBB3_12: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 3
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB3_5
-; CHECK-NEXT: .LBB3_13: # %cond.store10
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 4
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB3_6
-; CHECK-NEXT: .LBB3_14: # %cond.store13
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 5
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB3_7
-; CHECK-NEXT: .LBB3_15: # %cond.store16
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 6
-; CHECK-NEXT: vse8.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB3_8
-; CHECK-NEXT: .LBB3_16: # %cond.store19
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i8(<8 x i8> %v, ptr %base, <8 x i1> %mask)
ret void
@@ -186,13 +62,11 @@ declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) {
; CHECK-LABEL: compressstore_v1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB4_2
-; CHECK-NEXT: # %bb.1: # %cond.store
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: .LBB4_2: # %else
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr align 2 %base, <1 x i1> %mask)
ret void
@@ -202,25 +76,11 @@ declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) {
; CHECK-LABEL: compressstore_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB5_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB5_4
-; CHECK-NEXT: .LBB5_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB5_3: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB5_2
-; CHECK-NEXT: .LBB5_4: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr align 2 %base, <2 x i1> %mask)
ret void
@@ -230,45 +90,11 @@ declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) {
; CHECK-LABEL: compressstore_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB6_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB6_6
-; CHECK-NEXT: .LBB6_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB6_7
-; CHECK-NEXT: .LBB6_3: # %else5
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB6_8
-; CHECK-NEXT: .LBB6_4: # %else8
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB6_5: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB6_2
-; CHECK-NEXT: .LBB6_6: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB6_3
-; CHECK-NEXT: .LBB6_7: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB6_4
-; CHECK-NEXT: .LBB6_8: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr align 2 %base, <4 x i1> %mask)
ret void
@@ -278,85 +104,11 @@ declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) {
; CHECK-LABEL: compressstore_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB7_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB7_10
-; CHECK-NEXT: .LBB7_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB7_11
-; CHECK-NEXT: .LBB7_3: # %else5
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB7_12
-; CHECK-NEXT: .LBB7_4: # %else8
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB7_13
-; CHECK-NEXT: .LBB7_5: # %else11
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB7_14
-; CHECK-NEXT: .LBB7_6: # %else14
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB7_15
-; CHECK-NEXT: .LBB7_7: # %else17
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB7_16
-; CHECK-NEXT: .LBB7_8: # %else20
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB7_9: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB7_2
-; CHECK-NEXT: .LBB7_10: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB7_3
-; CHECK-NEXT: .LBB7_11: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB7_4
-; CHECK-NEXT: .LBB7_12: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 3
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB7_5
-; CHECK-NEXT: .LBB7_13: # %cond.store10
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 4
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB7_6
-; CHECK-NEXT: .LBB7_14: # %cond.store13
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 5
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB7_7
-; CHECK-NEXT: .LBB7_15: # %cond.store16
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 6
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB7_8
-; CHECK-NEXT: .LBB7_16: # %cond.store19
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr align 2 %base, <8 x i1> %mask)
ret void
@@ -366,13 +118,11 @@ declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) {
; CHECK-LABEL: compressstore_v1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB8_2
-; CHECK-NEXT: # %bb.1: # %cond.store
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: .LBB8_2: # %else
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr align 4 %base, <1 x i1> %mask)
ret void
@@ -382,25 +132,11 @@ declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) {
; CHECK-LABEL: compressstore_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB9_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB9_4
-; CHECK-NEXT: .LBB9_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB9_3: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB9_2
-; CHECK-NEXT: .LBB9_4: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr align 4 %base, <2 x i1> %mask)
ret void
@@ -410,45 +146,11 @@ declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) {
; CHECK-LABEL: compressstore_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB10_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB10_6
-; CHECK-NEXT: .LBB10_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB10_7
-; CHECK-NEXT: .LBB10_3: # %else5
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB10_8
-; CHECK-NEXT: .LBB10_4: # %else8
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB10_5: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB10_2
-; CHECK-NEXT: .LBB10_6: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB10_3
-; CHECK-NEXT: .LBB10_7: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB10_4
-; CHECK-NEXT: .LBB10_8: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr align 4 %base, <4 x i1> %mask)
ret void
@@ -458,89 +160,11 @@ declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
; CHECK-LABEL: compressstore_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB11_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB11_10
-; CHECK-NEXT: .LBB11_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB11_11
-; CHECK-NEXT: .LBB11_3: # %else5
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB11_12
-; CHECK-NEXT: .LBB11_4: # %else8
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB11_13
-; CHECK-NEXT: .LBB11_5: # %else11
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB11_14
-; CHECK-NEXT: .LBB11_6: # %else14
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB11_15
-; CHECK-NEXT: .LBB11_7: # %else17
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB11_16
-; CHECK-NEXT: .LBB11_8: # %else20
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB11_9: # %cond.store
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB11_2
-; CHECK-NEXT: .LBB11_10: # %cond.store1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB11_3
-; CHECK-NEXT: .LBB11_11: # %cond.store4
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 2
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB11_4
-; CHECK-NEXT: .LBB11_12: # %cond.store7
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 3
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vcompress.vm v10, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB11_5
-; CHECK-NEXT: .LBB11_13: # %cond.store10
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 4
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB11_6
-; CHECK-NEXT: .LBB11_14: # %cond.store13
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 5
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB11_7
-; CHECK-NEXT: .LBB11_15: # %cond.store16
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 6
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB11_8
-; CHECK-NEXT: .LBB11_16: # %cond.store19
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr align 4 %base, <8 x i1> %mask)
ret void
@@ -548,439 +172,59 @@ define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) {
-; RV32-LABEL: compressstore_v1i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB12_2
-; RV32-NEXT: # %bb.1: # %cond.store
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vsrl.vx v9, v8, a1
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: .LBB12_2: # %else
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v1i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB12_2
-; RV64-NEXT: # %bb.1: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: .LBB12_2: # %else
-; RV64-NEXT: ret
+; CHECK-LABEL: compressstore_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v9, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr align 8 %base, <1 x i1> %mask)
ret void
}
declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) {
-; RV32-LABEL: compressstore_v2i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB13_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB13_4
-; RV32-NEXT: .LBB13_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB13_3: # %cond.store
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vsrl.vx v9, v8, a2
-; RV32-NEXT: vmv.x.s a2, v9
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB13_2
-; RV32-NEXT: .LBB13_4: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsrl.vx v9, v8, a1
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v2i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB13_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB13_4
-; RV64-NEXT: .LBB13_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB13_3: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB13_2
-; RV64-NEXT: .LBB13_4: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: compressstore_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vcompress.vm v9, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v9, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr align 8 %base, <2 x i1> %mask)
ret void
}
declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) {
-; RV32-LABEL: compressstore_v4i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB14_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB14_6
-; RV32-NEXT: .LBB14_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB14_7
-; RV32-NEXT: .LBB14_3: # %else5
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB14_8
-; RV32-NEXT: .LBB14_4: # %else8
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB14_5: # %cond.store
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vsrl.vx v10, v8, a2
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB14_2
-; RV32-NEXT: .LBB14_6: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 1
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v12, v10, a2
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: vmv.x.s a3, v10
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB14_3
-; RV32-NEXT: .LBB14_7: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v12, v10, a2
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: vmv.x.s a3, v10
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB14_4
-; RV32-NEXT: .LBB14_8: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsrl.vx v10, v8, a1
-; RV32-NEXT: vmv.x.s a1, v10
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v4i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB14_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB14_6
-; RV64-NEXT: .LBB14_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB14_7
-; RV64-NEXT: .LBB14_3: # %else5
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB14_8
-; RV64-NEXT: .LBB14_4: # %else8
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB14_5: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB14_2
-; RV64-NEXT: .LBB14_6: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB14_3
-; RV64-NEXT: .LBB14_7: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v10, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB14_4
-; RV64-NEXT: .LBB14_8: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: compressstore_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vcompress.vm v10, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vse64.v v10, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr align 8 %base, <4 x i1> %mask)
ret void
}
declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) {
-; RV32-LABEL: compressstore_v8i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB15_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB15_10
-; RV32-NEXT: .LBB15_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB15_11
-; RV32-NEXT: .LBB15_3: # %else5
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB15_12
-; RV32-NEXT: .LBB15_4: # %else8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB15_13
-; RV32-NEXT: .LBB15_5: # %else11
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB15_14
-; RV32-NEXT: .LBB15_6: # %else14
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB15_15
-; RV32-NEXT: .LBB15_7: # %else17
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB15_16
-; RV32-NEXT: .LBB15_8: # %else20
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB15_9: # %cond.store
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vsrl.vx v12, v8, a2
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB15_2
-; RV32-NEXT: .LBB15_10: # %cond.store1
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 1
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB15_3
-; RV32-NEXT: .LBB15_11: # %cond.store4
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB15_4
-; RV32-NEXT: .LBB15_12: # %cond.store7
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 3
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB15_5
-; RV32-NEXT: .LBB15_13: # %cond.store10
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 4
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB15_6
-; RV32-NEXT: .LBB15_14: # %cond.store13
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 5
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB15_7
-; RV32-NEXT: .LBB15_15: # %cond.store16
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 6
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v12, a2
-; RV32-NEXT: vmv.x.s a2, v16
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: sw a2, 4(a0)
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB15_8
-; RV32-NEXT: .LBB15_16: # %cond.store19
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsrl.vx v12, v8, a1
-; RV32-NEXT: vmv.x.s a1, v12
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: sw a2, 0(a0)
-; RV32-NEXT: sw a1, 4(a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: compressstore_v8i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB15_11
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB15_12
-; RV64-NEXT: .LBB15_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB15_13
-; RV64-NEXT: .LBB15_3: # %else5
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB15_5
-; RV64-NEXT: .LBB15_4: # %cond.store7
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 3
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: .LBB15_5: # %else8
-; RV64-NEXT: addi sp, sp, -320
-; RV64-NEXT: .cfi_def_cfa_offset 320
-; RV64-NEXT: sd ra, 312(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 304(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: addi s0, sp, 320
-; RV64-NEXT: .cfi_def_cfa s0, 0
-; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB15_14
-; RV64-NEXT: # %bb.6: # %else11
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB15_15
-; RV64-NEXT: .LBB15_7: # %else14
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB15_16
-; RV64-NEXT: .LBB15_8: # %else17
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB15_10
-; RV64-NEXT: .LBB15_9: # %cond.store19
-; RV64-NEXT: mv a1, sp
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a1)
-; RV64-NEXT: ld a1, 56(sp)
-; RV64-NEXT: sd a1, 0(a0)
-; RV64-NEXT: .LBB15_10: # %else20
-; RV64-NEXT: addi sp, s0, -320
-; RV64-NEXT: ld ra, 312(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 304(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 320
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB15_11: # %cond.store
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB15_2
-; RV64-NEXT: .LBB15_12: # %cond.store1
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 1
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB15_3
-; RV64-NEXT: .LBB15_13: # %cond.store4
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 2
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vse64.v v12, (a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB15_4
-; RV64-NEXT: j .LBB15_5
-; RV64-NEXT: .LBB15_14: # %cond.store10
-; RV64-NEXT: addi a2, sp, 192
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: ld a2, 224(sp)
-; RV64-NEXT: sd a2, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB15_7
-; RV64-NEXT: .LBB15_15: # %cond.store13
-; RV64-NEXT: addi a2, sp, 128
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: ld a2, 168(sp)
-; RV64-NEXT: sd a2, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB15_8
-; RV64-NEXT: .LBB15_16: # %cond.store16
-; RV64-NEXT: addi a2, sp, 64
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vse64.v v8, (a2)
-; RV64-NEXT: ld a2, 112(sp)
-; RV64-NEXT: sd a2, 0(a0)
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB15_9
-; RV64-NEXT: j .LBB15_10
+; CHECK-LABEL: compressstore_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vcompress.vm v12, v8, v0
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vse64.v v12, (a0)
+; CHECK-NEXT: ret
call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr align 8 %base, <8 x i1> %mask)
ret void
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 68740ee..7dcfb24 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1599,15 +1599,16 @@ define float @vreduce_fminimum_v2f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB99_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB99_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -1619,15 +1620,8 @@ define float @vreduce_fminimum_v2f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -1641,24 +1635,16 @@ define float @vreduce_fminimum_v4f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB101_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB101_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -1670,24 +1656,8 @@ define float @vreduce_fminimum_v4f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -1701,33 +1671,16 @@ define float @vreduce_fminimum_v8f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB103_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB103_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -1739,33 +1692,8 @@ define float @vreduce_fminimum_v8f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -1779,42 +1707,16 @@ define float @vreduce_fminimum_v16f32(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB105_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB105_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -1826,42 +1728,8 @@ define float @vreduce_fminimum_v16f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -1876,51 +1744,16 @@ define float @vreduce_fminimum_v32f32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB107_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB107_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -1933,51 +1766,8 @@ define float @vreduce_fminimum_v32f32_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -2009,52 +1799,18 @@ define float @vreduce_fminimum_v64f32(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB109_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB109_3
+; CHECK-NEXT: .LBB109_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB109_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -2073,51 +1829,8 @@ define float @vreduce_fminimum_v64f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x float>, ptr %x
@@ -2208,52 +1921,18 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB111_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB111_3
+; CHECK-NEXT: .LBB111_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB111_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -2281,51 +1960,8 @@ define float @vreduce_fminimum_v128f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v0, (a1)
; CHECK-NEXT: vfmin.vv v16, v24, v16
; CHECK-NEXT: vfmin.vv v8, v8, v0
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <128 x float>, ptr %x
@@ -2339,15 +1975,16 @@ define double @vreduce_fminimum_v2f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB113_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI113_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI113_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB113_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -2359,15 +1996,8 @@ define double @vreduce_fminimum_v2f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -2381,24 +2011,16 @@ define double @vreduce_fminimum_v4f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB115_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI115_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI115_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB115_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -2410,24 +2032,8 @@ define double @vreduce_fminimum_v4f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -2441,33 +2047,16 @@ define double @vreduce_fminimum_v8f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB117_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI117_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI117_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB117_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -2479,33 +2068,8 @@ define double @vreduce_fminimum_v8f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -2519,42 +2083,16 @@ define double @vreduce_fminimum_v16f64(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB119_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI119_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI119_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB119_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -2566,42 +2104,8 @@ define double @vreduce_fminimum_v16f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -2632,43 +2136,18 @@ define double @vreduce_fminimum_v32f64(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB121_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI121_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI121_0)(a0)
+; CHECK-NEXT: j .LBB121_3
+; CHECK-NEXT: .LBB121_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB121_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -2686,42 +2165,8 @@ define double @vreduce_fminimum_v32f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x double>, ptr %x
@@ -2811,43 +2256,18 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB123_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI123_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI123_0)(a0)
+; CHECK-NEXT: j .LBB123_3
+; CHECK-NEXT: .LBB123_2:
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB123_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -2874,42 +2294,8 @@ define double @vreduce_fminimum_v64f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v0, (a1)
; CHECK-NEXT: vfmin.vv v16, v24, v16
; CHECK-NEXT: vfmin.vv v8, v8, v0
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: vfredmin.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x double>, ptr %x
@@ -2923,15 +2309,16 @@ define float @vreduce_fmaximum_v2f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB125_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB125_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -2943,15 +2330,8 @@ define float @vreduce_fmaximum_v2f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -2965,24 +2345,16 @@ define float @vreduce_fmaximum_v4f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB127_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB127_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -2994,24 +2366,8 @@ define float @vreduce_fmaximum_v4f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -3025,33 +2381,16 @@ define float @vreduce_fmaximum_v8f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB129_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB129_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -3063,33 +2402,8 @@ define float @vreduce_fmaximum_v8f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -3103,42 +2417,16 @@ define float @vreduce_fmaximum_v16f32(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB131_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB131_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -3150,42 +2438,8 @@ define float @vreduce_fmaximum_v16f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -3200,51 +2454,16 @@ define float @vreduce_fmaximum_v32f32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB133_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB133_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -3257,51 +2476,8 @@ define float @vreduce_fmaximum_v32f32_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -3333,52 +2509,18 @@ define float @vreduce_fmaximum_v64f32(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB135_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB135_3
+; CHECK-NEXT: .LBB135_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB135_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -3397,51 +2539,8 @@ define float @vreduce_fmaximum_v64f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x float>, ptr %x
@@ -3532,52 +2631,18 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB137_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, 523264
+; CHECK-NEXT: fmv.w.x fa0, a0
+; CHECK-NEXT: j .LBB137_3
+; CHECK-NEXT: .LBB137_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB137_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -3605,51 +2670,8 @@ define float @vreduce_fmaximum_v128f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v0, (a1)
; CHECK-NEXT: vfmax.vv v16, v24, v16
; CHECK-NEXT: vfmax.vv v8, v8, v0
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <128 x float>, ptr %x
@@ -3663,15 +2685,16 @@ define double @vreduce_fmaximum_v2f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v9, v8, v8
+; CHECK-NEXT: vcpop.m a0, v9
+; CHECK-NEXT: beqz a0, .LBB139_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI139_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI139_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB139_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -3683,15 +2706,8 @@ define double @vreduce_fmaximum_v2f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v2f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -3705,24 +2721,16 @@ define double @vreduce_fmaximum_v4f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v10, v8, v8
+; CHECK-NEXT: vcpop.m a0, v10
+; CHECK-NEXT: beqz a0, .LBB141_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI141_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI141_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB141_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -3734,24 +2742,8 @@ define double @vreduce_fmaximum_v4f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v4f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -3765,33 +2757,16 @@ define double @vreduce_fmaximum_v8f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v12, v8, v8
+; CHECK-NEXT: vcpop.m a0, v12
+; CHECK-NEXT: beqz a0, .LBB143_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI143_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI143_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB143_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -3803,33 +2778,8 @@ define double @vreduce_fmaximum_v8f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -3843,42 +2793,16 @@ define double @vreduce_fmaximum_v16f64(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB145_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI145_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI145_0)(a0)
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB145_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -3890,42 +2814,8 @@ define double @vreduce_fmaximum_v16f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fmaximum_v16f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x double>, ptr %x
@@ -3956,43 +2846,18 @@ define double @vreduce_fmaximum_v32f64(ptr %x) {
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB147_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI147_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI147_0)(a0)
+; CHECK-NEXT: j .LBB147_3
+; CHECK-NEXT: .LBB147_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB147_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
@@ -4010,42 +2875,8 @@ define double @vreduce_fmaximum_v32f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x double>, ptr %x
@@ -4135,43 +2966,18 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vmfne.vv v16, v8, v8
+; CHECK-NEXT: vcpop.m a0, v16
+; CHECK-NEXT: beqz a0, .LBB149_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: lui a0, %hi(.LCPI149_0)
+; CHECK-NEXT: fld fa0, %lo(.LCPI149_0)(a0)
+; CHECK-NEXT: j .LBB149_3
+; CHECK-NEXT: .LBB149_2:
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: .LBB149_3:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: mv a1, a0
@@ -4198,42 +3004,8 @@ define double @vreduce_fmaximum_v64f64_nonans(ptr %x) {
; CHECK-NEXT: vle64.v v0, (a1)
; CHECK-NEXT: vfmax.vv v16, v24, v16
; CHECK-NEXT: vfmax.vv v8, v8, v0
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmax.vv v12, v12, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmax.vv v10, v10, v8
-; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmax.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmax.vv v8, v11, v8
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: vfredmax.vs v8, v8, v8
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x double>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
new file mode 100644
index 0000000..a4ab67f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: sad_4x8_as_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwredsumu.vs v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <4 x i8> %a to <4 x i16>
+ %3 = zext <4 x i8> %b to <4 x i16>
+ %4 = sub nsw <4 x i16> %1, %3
+ %5 = tail call <4 x i16> @llvm.abs.v4i16(<4 x i16> %4, i1 true)
+ %6 = tail call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %5)
+ ret i16 %6
+}
+
+define signext i32 @sad_4x8_as_i32(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: sad_4x8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v9, v8
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v9, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <4 x i8> %a to <4 x i32>
+ %3 = zext <4 x i8> %b to <4 x i32>
+ %4 = sub nsw <4 x i32> %1, %3
+ %5 = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %5)
+ ret i32 %6
+}
+
+define signext i16 @sad_16x8_as_i16(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sad_16x8_as_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwredsumu.vs v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <16 x i8> %a to <16 x i16>
+ %3 = zext <16 x i8> %b to <16 x i16>
+ %4 = sub nsw <16 x i16> %1, %3
+ %5 = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %4, i1 true)
+ %6 = tail call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %5)
+ ret i16 %6
+}
+
+define signext i32 @sad_16x8_as_i32(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sad_16x8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v8
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v12, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <16 x i8> %a to <16 x i32>
+ %3 = zext <16 x i8> %b to <16 x i32>
+ %4 = sub nsw <16 x i32> %1, %3
+ %5 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+ ret i32 %6
+}
+
+define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea, i32 signext %strideb) {
+; CHECK-LABEL: sad_2block_16xi8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a1)
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vle8.v v11, (a1)
+; CHECK-NEXT: vminu.vv v12, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vminu.vv v9, v10, v11
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vle8.v v13, (a1)
+; CHECK-NEXT: vmaxu.vv v10, v10, v11
+; CHECK-NEXT: vsub.vv v9, v10, v9
+; CHECK-NEXT: vwaddu.vv v10, v9, v8
+; CHECK-NEXT: vminu.vv v8, v12, v13
+; CHECK-NEXT: vmaxu.vv v9, v12, v13
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v12, (a1)
+; CHECK-NEXT: vzext.vf2 v14, v8
+; CHECK-NEXT: vwaddu.vv v16, v14, v10
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v8, v9, v12
+; CHECK-NEXT: vmaxu.vv v9, v9, v12
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vwaddu.wv v16, v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v16, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %idx.ext8 = sext i32 %strideb to i64
+ %idx.ext = sext i32 %stridea to i64
+ %0 = load <16 x i8>, ptr %a, align 1
+ %1 = zext <16 x i8> %0 to <16 x i32>
+ %2 = load <16 x i8>, ptr %b, align 1
+ %3 = zext <16 x i8> %2 to <16 x i32>
+ %4 = sub nsw <16 x i32> %1, %3
+ %5 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+ %add.ptr = getelementptr inbounds i8, ptr %a, i64 %idx.ext
+ %add.ptr9 = getelementptr inbounds i8, ptr %b, i64 %idx.ext8
+ %7 = load <16 x i8>, ptr %add.ptr, align 1
+ %8 = zext <16 x i8> %7 to <16 x i32>
+ %9 = load <16 x i8>, ptr %add.ptr9, align 1
+ %10 = zext <16 x i8> %9 to <16 x i32>
+ %11 = sub nsw <16 x i32> %8, %10
+ %12 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %11, i1 true)
+ %13 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %12)
+ %op.rdx.1 = add i32 %13, %6
+ %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
+ %add.ptr9.1 = getelementptr inbounds i8, ptr %add.ptr9, i64 %idx.ext8
+ %14 = load <16 x i8>, ptr %add.ptr.1, align 1
+ %15 = zext <16 x i8> %14 to <16 x i32>
+ %16 = load <16 x i8>, ptr %add.ptr9.1, align 1
+ %17 = zext <16 x i8> %16 to <16 x i32>
+ %18 = sub nsw <16 x i32> %15, %17
+ %19 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %18, i1 true)
+ %20 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %19)
+ %op.rdx.2 = add i32 %20, %op.rdx.1
+ %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
+ %add.ptr9.2 = getelementptr inbounds i8, ptr %add.ptr9.1, i64 %idx.ext8
+ %21 = load <16 x i8>, ptr %add.ptr.2, align 1
+ %22 = zext <16 x i8> %21 to <16 x i32>
+ %23 = load <16 x i8>, ptr %add.ptr9.2, align 1
+ %24 = zext <16 x i8> %23 to <16 x i32>
+ %25 = sub nsw <16 x i32> %22, %24
+ %26 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %25, i1 true)
+ %27 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %26)
+ %op.rdx.3 = add i32 %27, %op.rdx.2
+ ret i32 %op.rdx.3
+}
+
+declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+
+declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 8474f95..98e6b8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -5,59 +5,6 @@
; RUN: llc < %s -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
-define <8 x i16> @concat_2xv4i16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK-LABEL: concat_2xv4i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 4
-; CHECK-NEXT: ret
- %ab = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %ab
-}
-
-define <8 x i16> @concat_4xv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
-; CHECK-LABEL: concat_4xv2i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v11, 2
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 4
-; CHECK-NEXT: ret
- %ab = shufflevector <2 x i16> %a, <2 x i16> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %cd = shufflevector <2 x i16> %c, <2 x i16> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %abcd = shufflevector <4 x i16> %ab, <4 x i16> %cd, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %abcd
-}
-
-define <8 x i16> @concat_8xv1i16(<1 x i16> %a, <1 x i16> %b, <1 x i16> %c, <1 x i16> %d, <1 x i16> %e, <1 x i16> %f, <1 x i16> %g, <1 x i16> %h) {
-; CHECK-LABEL: concat_8xv1i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v12, v13, 1
-; CHECK-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v12, v14, 2
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v12, v15, 3
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
-; CHECK-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 2
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v11, 3
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 4
-; CHECK-NEXT: ret
- %ab = shufflevector <1 x i16> %a, <1 x i16> %b, <2 x i32> <i32 0, i32 1>
- %cd = shufflevector <1 x i16> %c, <1 x i16> %d, <2 x i32> <i32 0, i32 1>
- %abcd = shufflevector <2 x i16> %ab, <2 x i16> %cd, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %ef = shufflevector <1 x i16> %e, <1 x i16> %f, <2 x i32> <i32 0, i32 1>
- %gh = shufflevector <1 x i16> %g, <1 x i16> %h, <2 x i32> <i32 0, i32 1>
- %efgh = shufflevector <2 x i16> %ef, <2 x i16> %gh, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %abcdefgh = shufflevector <4 x i16> %abcd, <4 x i16> %efgh, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %abcdefgh
-}
-
define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: concat_2xv4i32:
; CHECK: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index 37902aa..657d523 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -24,15 +24,18 @@ define void @widen_2xv4i16(ptr %x, ptr %z) {
define void @widen_3xv4i16(ptr %x, ptr %z) {
; CHECK-LABEL: widen_3xv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, a0, 16
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vle16.v v8, (a2)
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: addi a2, a0, 8
+; CHECK-NEXT: vle16.v v9, (a2)
+; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 8
+; CHECK-NEXT: vslideup.vi v8, v10, 8
; CHECK-NEXT: vsetivli zero, 12, e16, m2, ta, ma
-; CHECK-NEXT: vse16.v v10, (a1)
+; CHECK-NEXT: vse16.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i16>, ptr %x
%b.gep = getelementptr i8, ptr %x, i64 8
@@ -181,14 +184,20 @@ define void @strided_constant_0(ptr %x, ptr %z) {
define void @strided_constant_mismatch_4xv4i16(ptr %x, ptr %z) {
; CHECK-LABEL: strided_constant_mismatch_4xv4i16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: addi a2, a0, 6
-; CHECK-NEXT: li a3, 2
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v8, (a0), a3
-; CHECK-NEXT: vlse64.v v10, (a2), a3
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 2
-; CHECK-NEXT: vse64.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: addi a2, a0, 2
+; CHECK-NEXT: addi a0, a0, 8
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v11, (a2)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v10, v9, 4
+; CHECK-NEXT: vslideup.vi v8, v11, 4
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 8
+; CHECK-NEXT: vse16.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i16>, ptr %x
%b.gep = getelementptr i8, ptr %x, i64 2
@@ -244,38 +253,56 @@ define void @strided_runtime_4xv4i16(ptr %x, ptr %z, i64 %s) {
define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
; RV32-LABEL: strided_runtime_mismatch_4xv4i16:
; RV32: # %bb.0:
-; RV32-NEXT: add a3, a0, a2
-; RV32-NEXT: add a3, a3, a4
-; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v8, (a0), a2
-; RV32-NEXT: vlse64.v v10, (a3), a2
-; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vslideup.vi v8, v10, 2
-; RV32-NEXT: vse64.v v8, (a1)
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: add a4, a0, a4
+; RV32-NEXT: vle16.v v10, (a4)
+; RV32-NEXT: add a2, a4, a2
+; RV32-NEXT: vle16.v v9, (a2)
+; RV32-NEXT: vle16.v v11, (a0)
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslideup.vi v10, v9, 4
+; RV32-NEXT: vslideup.vi v8, v11, 4
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslideup.vi v8, v10, 8
+; RV32-NEXT: vse16.v v8, (a1)
; RV32-NEXT: ret
;
; RV64-LABEL: strided_runtime_mismatch_4xv4i16:
; RV64: # %bb.0:
-; RV64-NEXT: add a4, a0, a2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vlse64.v v8, (a0), a2
-; RV64-NEXT: vlse64.v v10, (a3), a2
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vslideup.vi v8, v10, 2
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: add a0, a0, a2
+; RV64-NEXT: add a3, a0, a3
+; RV64-NEXT: vle16.v v10, (a3)
+; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: vle16.v v9, (a2)
+; RV64-NEXT: vle16.v v11, (a0)
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslideup.vi v10, v9, 4
+; RV64-NEXT: vslideup.vi v8, v11, 4
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslideup.vi v8, v10, 8
+; RV64-NEXT: vse16.v v8, (a1)
; RV64-NEXT: ret
;
; ZVE64F-LABEL: strided_runtime_mismatch_4xv4i16:
; ZVE64F: # %bb.0:
-; ZVE64F-NEXT: add a4, a0, a2
-; ZVE64F-NEXT: add a3, a4, a3
-; ZVE64F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; ZVE64F-NEXT: vlse64.v v8, (a0), a2
-; ZVE64F-NEXT: vlse64.v v10, (a3), a2
-; ZVE64F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; ZVE64F-NEXT: vslideup.vi v8, v10, 2
-; ZVE64F-NEXT: vse64.v v8, (a1)
+; ZVE64F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVE64F-NEXT: vle16.v v8, (a0)
+; ZVE64F-NEXT: add a0, a0, a2
+; ZVE64F-NEXT: add a3, a0, a3
+; ZVE64F-NEXT: vle16.v v10, (a3)
+; ZVE64F-NEXT: add a2, a3, a2
+; ZVE64F-NEXT: vle16.v v9, (a2)
+; ZVE64F-NEXT: vle16.v v11, (a0)
+; ZVE64F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVE64F-NEXT: vslideup.vi v10, v9, 4
+; ZVE64F-NEXT: vslideup.vi v8, v11, 4
+; ZVE64F-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVE64F-NEXT: vslideup.vi v8, v10, 8
+; ZVE64F-NEXT: vse16.v v8, (a1)
; ZVE64F-NEXT: ret
%a = load <4 x i16>, ptr %x
%b.gep = getelementptr i8, ptr %x, i64 %s
@@ -534,3 +561,28 @@ define void @reverse_strided_runtime_4xv2f32(ptr %x, ptr %z, i64 %s) {
store <8 x float> %e.2, ptr %z
ret void
}
+
+; The middle end sometimes produces this pattern of shuffles, where the
+; intermediate shuffles are the full result vector size padded with poison
+; elements.
+define <16 x i8> @widen_4xv4i8_immediate_expand(ptr %p, i64 %s) {
+; CHECK-LABEL: widen_4xv4i8_immediate_expand:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vlse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %p
+ %b.ptr = getelementptr i8, ptr %p, i64 %s
+ %b = load <4 x i8>, ptr %b.ptr
+ %c.ptr = getelementptr i8, ptr %b.ptr, i64 %s
+ %c = load <4 x i8>, ptr %c.ptr
+ %d.ptr = getelementptr i8, ptr %c.ptr, i64 %s
+ %d = load <4 x i8>, ptr %d.ptr
+
+ %ab = shufflevector <4 x i8> %a, <4 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %cx = shufflevector <4 x i8> %c, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %dx = shufflevector <4 x i8> %d, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %abcx = shufflevector <16 x i8> %ab, <16 x i8> %cx, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+ %abcd = shufflevector <16 x i8> %abcx, <16 x i8> %dx, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+ ret <16 x i8> %abcd
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index 57a72c6..bc0bf5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -385,12 +385,12 @@ define <32 x i64> @vwaddu_v32i64(ptr %x, ptr %y) nounwind {
define <2 x i32> @vwaddu_v2i32_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i32_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -912,12 +912,12 @@ define <4 x i64> @crash(<4 x i16> %x, <4 x i16> %y) {
define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i32_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -930,12 +930,12 @@ define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i64_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -948,12 +948,12 @@ define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwaddu_v2i64_of_v2i16(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i64_of_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i16>, ptr %x
%b = load <2 x i16>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index bff7ef8..b97c965 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -391,12 +391,12 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
define <2 x i32> @vwmulu_v2i32_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwmulu_v2i32_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwmulu.vv v8, v10, v11
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll
new file mode 100644
index 0000000..f5305a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll
@@ -0,0 +1,920 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
+
+; ==============================================================================
+; i32 -> i64
+; ==============================================================================
+
+define <4 x i64> @vwsll_vv_v4i64_sext(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i32> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vv_v4i64_zext(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i32> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i64_v4i64(<4 x i32> %a, i64 %b) {
+; CHECK-LABEL: vwsll_vx_i64_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_v4i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <4 x i64> %head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %z = shl <4 x i64> %x, %splat
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_sext(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_zext(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_sext(<4 x i32> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_zext(<4 x i32> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_sext(<4 x i32> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = sext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_zext(<4 x i32> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i32> %a to <4 x i64>
+ %y = zext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vi_v4i64(<4 x i32> %a) {
+; CHECK-LABEL: vwsll_vi_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v4i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i32> %a to <4 x i64>
+ %z = shl <4 x i64> %x, splat (i64 2)
+ ret <4 x i64> %z
+}
+
+; ==============================================================================
+; i16 -> i32
+; ==============================================================================
+
+define <8 x i32> @vwsll_vv_v8i32_sext(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vwsll_vv_v8i32_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v8i32_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = sext <8 x i16> %b to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vv_v8i32_zext(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vwsll_vv_v8i32_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v8i32_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = zext <8 x i16> %b to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i64_v8i32(<8 x i16> %a, i64 %b) {
+; CHECK-LABEL: vwsll_vx_i64_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_v8i32:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <8 x i64> %head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = trunc <8 x i64> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i32_v8i32(<8 x i16> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v8i32:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <8 x i32> %head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %z = shl <8 x i32> %x, %splat
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i16_v8i32_sext(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v8i32_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v8i32_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <8 x i16> %head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = sext <8 x i16> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i16_v8i32_zext(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v8i32_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v8i32_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <8 x i16> %head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = zext <8 x i16> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i8_v8i32_sext(<8 x i16> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v8i32_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v8i32_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <8 x i8> %head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = sext <8 x i8> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vx_i8_v8i32_zext(<8 x i16> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v8i32_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v8i32_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <8 x i8> %head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %x = zext <8 x i16> %a to <8 x i32>
+ %y = zext <8 x i8> %splat to <8 x i32>
+ %z = shl <8 x i32> %x, %y
+ ret <8 x i32> %z
+}
+
+define <8 x i32> @vwsll_vi_v8i32(<8 x i16> %a) {
+; CHECK-LABEL: vwsll_vi_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v8i32:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <8 x i16> %a to <8 x i32>
+ %z = shl <8 x i32> %x, splat (i32 2)
+ ret <8 x i32> %z
+}
+
+; ==============================================================================
+; i8 -> i16
+; ==============================================================================
+
+define <16 x i16> @vwsll_vv_v16i16_sext(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v16i16_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v16i16_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = sext <16 x i8> %b to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vv_v16i16_zext(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v16i16_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v16i16_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = zext <16 x i8> %b to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i64_v16i16(<16 x i8> %a, i64 %b) {
+ %head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <8 x i64> %head, <8 x i64> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = trunc <16 x i64> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i32_v16i16(<16 x i8> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vmv.v.x v12, a0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsll.vv v8, v10, v8
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v16i16:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v12, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v8
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <16 x i32> %head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = trunc <16 x i32> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i16_v16i16(<16 x i8> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v16i16:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <16 x i16> %head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %z = shl <16 x i16> %x, %splat
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i8_v16i16_sext(<16 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v16i16_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v16i16_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <16 x i8> %head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = sext <16 x i8> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vx_i8_v16i16_zext(<16 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v16i16_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v16i16_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <16 x i8> %head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %x = zext <16 x i8> %a to <16 x i16>
+ %y = zext <16 x i8> %splat to <16 x i16>
+ %z = shl <16 x i16> %x, %y
+ ret <16 x i16> %z
+}
+
+define <16 x i16> @vwsll_vi_v16i16(<16 x i8> %a) {
+; CHECK-LABEL: vwsll_vi_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v16i16:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf2 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <16 x i8> %a to <16 x i16>
+ %z = shl <16 x i16> %x, splat (i16 2)
+ ret <16 x i16> %z
+}
+
+; ==============================================================================
+; i8 -> i64
+; ==============================================================================
+
+define <4 x i64> @vwsll_vv_v4i64_v4i8_sext(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i8> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vv_v4i64_v4i8_zext(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i8> %b to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i64_v4i64_v4i8(<4 x i8> %a, i64 %b) {
+; CHECK-LABEL: vwsll_vx_i64_v4i64_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsll.vx v8, v10, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_v4i64_v4i8:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vx v8, v10, a0
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %splat = shufflevector <4 x i64> %head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %z = shl <4 x i64> %x, %splat
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_v4i8_sext(<4 x i8> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i32_v4i64_v4i8_zext(<4 x i8> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf2 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf2 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i32> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_v4i8_sext(<4 x i8> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i16_v4i64_v4i8_zext(<4 x i8> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf4 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf4 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <4 x i16> %head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i16> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_v4i8_sext(<4 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_v4i8_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_v4i8_sext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = sext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vx_i8_v4i64_v4i8_zext(<4 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_v4i64_v4i8_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vzext.vf8 v12, v9
+; CHECK-NEXT: vsll.vv v8, v10, v12
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_v4i64_v4i8_zext:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT: vmv.v.x v9, a0
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vzext.vf8 v12, v9
+; CHECK-ZVBB-NEXT: vsll.vv v8, v10, v12
+; CHECK-ZVBB-NEXT: ret
+ %head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %splat = shufflevector <4 x i8> %head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %x = zext <4 x i8> %a to <4 x i64>
+ %y = zext <4 x i8> %splat to <4 x i64>
+ %z = shl <4 x i64> %x, %y
+ ret <4 x i64> %z
+}
+
+define <4 x i64> @vwsll_vi_v4i64_v4i8(<4 x i8> %a) {
+; CHECK-LABEL: vwsll_vi_v4i64_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf8 v10, v8
+; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_v4i64_v4i8:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vzext.vf8 v10, v8
+; CHECK-ZVBB-NEXT: vsll.vi v8, v10, 2
+; CHECK-ZVBB-NEXT: ret
+ %x = zext <4 x i8> %a to <4 x i64>
+ %z = shl <4 x i64> %x, splat (i64 2)
+ ret <4 x i64> %z
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
index 0544204..52bd157 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
@@ -16,8 +16,8 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; CHECK-NEXT: $v0 = COPY [[COPY1]]
- ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */
- ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8)
+ ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
; CHECK-NEXT: PseudoRET
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
%mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
@@ -37,8 +37,8 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; CHECK-NEXT: $v0 = COPY [[COPY1]]
- ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */
- ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8)
+ ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
; CHECK-NEXT: PseudoRET
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
%mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index a4aef57..7cc4a9d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -1187,3 +1187,30 @@ define <vscale x 2 x i32> @vmerge_larger_vl_false_becomes_tail(<vscale x 2 x i32
%b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %false, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
ret <vscale x 2 x i32> %b
}
+
+; Test widening pseudos with their TIED variant (passthru same as first op).
+define <vscale x 2 x i64> @vpmerge_vwsub.w_tied(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 zeroext %vl) {
+; CHECK-LABEL: vpmerge_vwsub.w_tied:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %vl.zext = zext i32 %vl to i64
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %passthru, <vscale x 2 x i32> %y, i64 %vl.zext)
+ %b = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %a, <vscale x 2 x i64> %passthru, i32 %vl)
+ ret <vscale x 2 x i64> %b
+}
+
+define <vscale x 2 x double> @vpmerge_vfwsub.w_tied(<vscale x 2 x double> %passthru, <vscale x 2 x double> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 zeroext %vl) {
+; CHECK-LABEL: vpmerge_vfwsub.w_tied:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: fsrmi a0, 1
+; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+ %vl.zext = zext i32 %vl to i64
+ %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(<vscale x 2 x double> %passthru, <vscale x 2 x double> %passthru, <vscale x 2 x float> %y, i64 1, i64 %vl.zext)
+ %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %mask, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
+ ret <vscale x 2 x double> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
index 6ea6fb1..749bd4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
@@ -159,7 +159,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
@@ -204,7 +204,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 16
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
@@ -249,7 +249,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 32
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index a320aec..6a71208 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -18,10 +18,10 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_load_nxv16i
; CHECK-NEXT: vmerge.vim v14, v10, 1, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
-; CHECK-NEXT: vnsrl.wi v8, v12, 0
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vnsrl.wi v10, v12, 0
; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: ret
%vec = load <vscale x 32 x i1>, ptr %p
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index ef4baf3..d98597fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -8,18 +8,18 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv
; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a0
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vnsrl.wi v12, v8, 0
-; CHECK-NEXT: vmsne.vi v0, v12, 0
-; CHECK-NEXT: vnsrl.wi v12, v8, 8
-; CHECK-NEXT: vmsne.vi v8, v12, 0
+; CHECK-NEXT: vmerge.vim v14, v8, 1, v0
+; CHECK-NEXT: vnsrl.wi v10, v12, 0
+; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: ret
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
ret {<vscale x 16 x i1>, <vscale x 16 x i1>} %retval
@@ -102,12 +102,13 @@ define {<vscale x 64 x i1>, <vscale x 64 x i1>} @vector_deinterleave_nxv64i1_nxv
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v28, v8, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v24, 0
+; CHECK-NEXT: vmsne.vi v7, v24, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v16, 8
; CHECK-NEXT: vnsrl.wi v28, v8, 8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v8, v24, 0
+; CHECK-NEXT: vmsne.vi v9, v24, 0
+; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: ret
%retval = call {<vscale x 64 x i1>, <vscale x 64 x i1>} @llvm.experimental.vector.deinterleave2.nxv128i1(<vscale x 128 x i1> %vec)
ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 4aae8b8..9a5e86d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -101,40 +101,36 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
; CHECK-NEXT: vand.vi v26, v24, 1
-; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vsrl.vi v6, v24, 1
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 3
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v6, v6, a2, v0.t
+; CHECK-NEXT: vmsne.vi v28, v26, 0
+; CHECK-NEXT: vsrl.vi v24, v24, 1
+; CHECK-NEXT: vmv1r.v v0, v28
+; CHECK-NEXT: vadd.vx v24, v24, a2, v0.t
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v24, v8, v6
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vrgatherei16.vv v0, v8, v24
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v6
+; CHECK-NEXT: vrgatherei16.vv v8, v16, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 1acc0fe..0992c9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -656,6 +656,24 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
ret <vscale x 16 x double> %res
}
+define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; ZVBB-NEXT: vzext.vf2 v12, v8
+; ZVBB-NEXT: vmv.v.v v8, v12
+; ZVBB-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> poison)
+ ret <vscale x 8 x i32> %res
+}
+
declare <vscale x 64 x half> @llvm.experimental.vector.interleave2.nxv64f16(<vscale x 32 x half>, <vscale x 32 x half>)
declare <vscale x 32 x float> @llvm.experimental.vector.interleave2.nxv32f32(<vscale x 16 x float>, <vscale x 16 x float>)
declare <vscale x 16 x double> @llvm.experimental.vector.interleave2.nxv16f64(<vscale x 8 x double>, <vscale x 8 x double>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
index 972fa66..e56dca0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
@@ -283,18 +283,19 @@ define <vscale x 2 x i32> @vwop_vscale_sext_i8i32_multiple_users(ptr %x, ptr %y,
;
; FOLDING-LABEL: vwop_vscale_sext_i8i32_multiple_users:
; FOLDING: # %bb.0:
-; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
; FOLDING-NEXT: vle8.v v8, (a0)
; FOLDING-NEXT: vle8.v v9, (a1)
; FOLDING-NEXT: vle8.v v10, (a2)
-; FOLDING-NEXT: vsext.vf4 v11, v8
-; FOLDING-NEXT: vsext.vf4 v8, v9
-; FOLDING-NEXT: vsext.vf4 v9, v10
-; FOLDING-NEXT: vmul.vv v8, v11, v8
-; FOLDING-NEXT: vadd.vv v10, v11, v9
-; FOLDING-NEXT: vsub.vv v9, v11, v9
-; FOLDING-NEXT: vor.vv v8, v8, v10
-; FOLDING-NEXT: vor.vv v8, v8, v9
+; FOLDING-NEXT: vsext.vf2 v11, v8
+; FOLDING-NEXT: vsext.vf2 v8, v9
+; FOLDING-NEXT: vsext.vf2 v9, v10
+; FOLDING-NEXT: vwmul.vv v10, v11, v8
+; FOLDING-NEXT: vwadd.vv v8, v11, v9
+; FOLDING-NEXT: vwsub.vv v12, v11, v9
+; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vor.vv v8, v10, v8
+; FOLDING-NEXT: vor.vv v8, v8, v12
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = load <vscale x 2 x i8>, ptr %y
@@ -563,18 +564,19 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i8i32_multiple_users(ptr %x, ptr %y,
;
; FOLDING-LABEL: vwop_vscale_zext_i8i32_multiple_users:
; FOLDING: # %bb.0:
-; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
; FOLDING-NEXT: vle8.v v8, (a0)
; FOLDING-NEXT: vle8.v v9, (a1)
; FOLDING-NEXT: vle8.v v10, (a2)
-; FOLDING-NEXT: vzext.vf4 v11, v8
-; FOLDING-NEXT: vzext.vf4 v8, v9
-; FOLDING-NEXT: vzext.vf4 v9, v10
-; FOLDING-NEXT: vmul.vv v8, v11, v8
-; FOLDING-NEXT: vadd.vv v10, v11, v9
-; FOLDING-NEXT: vsub.vv v9, v11, v9
-; FOLDING-NEXT: vor.vv v8, v8, v10
-; FOLDING-NEXT: vor.vv v8, v8, v9
+; FOLDING-NEXT: vzext.vf2 v11, v8
+; FOLDING-NEXT: vzext.vf2 v8, v9
+; FOLDING-NEXT: vzext.vf2 v9, v10
+; FOLDING-NEXT: vwmulu.vv v10, v11, v8
+; FOLDING-NEXT: vwaddu.vv v8, v11, v9
+; FOLDING-NEXT: vwsubu.vv v12, v11, v9
+; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vor.vv v8, v10, v8
+; FOLDING-NEXT: vor.vv v8, v8, v12
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = load <vscale x 2 x i8>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
index a559fbf..66e6883 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i32:
@@ -421,10 +421,10 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i32(<vscale x 8 x i64> %va, i32
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -435,10 +435,10 @@ define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vsc
define <vscale x 1 x i64> @vwaddu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -451,10 +451,10 @@ define <vscale x 1 x i64> @vwadd_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -468,11 +468,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -485,9 +483,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -497,9 +495,9 @@ define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vsc
define <vscale x 1 x i64> @vwaddu_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -511,9 +509,9 @@ define <vscale x 1 x i64> @vwadd_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -527,9 +525,9 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -541,10 +539,10 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -555,10 +553,10 @@ define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vsc
define <vscale x 2 x i64> @vwaddu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -571,10 +569,10 @@ define <vscale x 2 x i64> @vwadd_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -588,11 +586,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -605,9 +601,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -617,9 +613,9 @@ define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vsc
define <vscale x 2 x i64> @vwaddu_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -631,9 +627,9 @@ define <vscale x 2 x i64> @vwadd_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -647,9 +643,9 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -661,10 +657,10 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -675,10 +671,10 @@ define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vsc
define <vscale x 4 x i64> @vwaddu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -691,10 +687,10 @@ define <vscale x 4 x i64> @vwadd_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -708,11 +704,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -725,9 +719,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -737,9 +731,9 @@ define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vsc
define <vscale x 4 x i64> @vwaddu_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -751,9 +745,9 @@ define <vscale x 4 x i64> @vwadd_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -767,9 +761,9 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -781,10 +775,10 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -795,10 +789,10 @@ define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vsc
define <vscale x 8 x i64> @vwaddu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwaddu.vv v16, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -811,10 +805,10 @@ define <vscale x 8 x i64> @vwadd_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -828,11 +822,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vwaddu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -845,9 +837,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -857,9 +849,9 @@ define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vwaddu_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -871,9 +863,9 @@ define <vscale x 8 x i64> @vwadd_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -887,9 +879,9 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -901,10 +893,10 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -915,10 +907,10 @@ define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscal
define <vscale x 1 x i64> @vwaddu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -931,10 +923,10 @@ define <vscale x 1 x i64> @vwadd_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -948,11 +940,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf4 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -965,9 +955,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -977,9 +967,9 @@ define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vsca
define <vscale x 1 x i64> @vwaddu_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -991,9 +981,9 @@ define <vscale x 1 x i64> @vwadd_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1007,9 +997,9 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1021,10 +1011,10 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1035,10 +1025,10 @@ define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscal
define <vscale x 2 x i64> @vwaddu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1051,10 +1041,10 @@ define <vscale x 2 x i64> @vwadd_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1068,11 +1058,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1085,9 +1073,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -1097,9 +1085,9 @@ define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vsca
define <vscale x 2 x i64> @vwaddu_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -1111,9 +1099,9 @@ define <vscale x 2 x i64> @vwadd_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1127,9 +1115,9 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1141,10 +1129,10 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1155,10 +1143,10 @@ define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscal
define <vscale x 4 x i64> @vwaddu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1171,10 +1159,10 @@ define <vscale x 4 x i64> @vwadd_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1188,11 +1176,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1205,9 +1191,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -1217,9 +1203,9 @@ define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vsca
define <vscale x 4 x i64> @vwaddu_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -1231,9 +1217,9 @@ define <vscale x 4 x i64> @vwadd_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1247,9 +1233,9 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1261,10 +1247,10 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1275,10 +1261,10 @@ define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscal
define <vscale x 8 x i64> @vwaddu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v16, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1291,10 +1277,10 @@ define <vscale x 8 x i64> @vwadd_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1308,11 +1294,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1325,9 +1309,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -1337,9 +1321,9 @@ define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vsca
define <vscale x 8 x i64> @vwaddu_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -1351,9 +1335,9 @@ define <vscale x 8 x i64> @vwadd_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1367,9 +1351,9 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1377,3 +1361,108 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
%vc = add <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
+
+; Make sure that we don't introduce any V{S,Z}EXT_VL nodes with i1 types from
+; combineBinOp_VLToVWBinOp_VL, since they can't be selected.
+define <vscale x 1 x i64> @i1_zext(<vscale x 1 x i1> %va, <vscale x 1 x i64> %vb, ptr %p) {
+; RV32-LABEL: i1_zext:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vmv.v.i v9, 0
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: li a1, 42
+; RV32-NEXT: sh a1, 0(a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: i1_zext:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
+; RV64-NEXT: vadd.vi v8, v8, 1, v0.t
+; RV64-NEXT: li a1, 42
+; RV64-NEXT: sh a1, 0(a0)
+; RV64-NEXT: ret
+ %vc = zext <vscale x 1 x i1> %va to <vscale x 1 x i64>
+ %vd = add <vscale x 1 x i64> %vc, %vb
+
+; Introduce an illegal type so that the DAG changes after legalizing
+; types. Otherwise the legalize vector ops phase will be run immediately after
+; the legalize types phase, and the zext will already be in non-i1 form by the
+; time combineBinOp_VLToVWBinOp_VL is called.
+ store i9 42, ptr %p
+ ret <vscale x 1 x i64> %vd
+}
+
+; %x.i32 and %y.i32 are disjoint, so DAGCombiner will combine it into an or.
+; FIXME: We should be able to recover the or into vwaddu.vv if the disjoint
+; flag is set.
+define <vscale x 2 x i32> @vwaddu_vv_disjoint_or_add(<vscale x 2 x i8> %x.i8, <vscale x 2 x i8> %y.i8) {
+; CHECK-LABEL: vwaddu_vv_disjoint_or_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v10, v10, 8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: ret
+ %x.i16 = zext <vscale x 2 x i8> %x.i8 to <vscale x 2 x i16>
+ %x.shl = shl <vscale x 2 x i16> %x.i16, shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 8, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer)
+ %x.i32 = zext <vscale x 2 x i16> %x.shl to <vscale x 2 x i32>
+ %y.i32 = zext <vscale x 2 x i8> %y.i8 to <vscale x 2 x i32>
+ %add = add <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %add
+}
+
+; TODO: We could select vwaddu.vv, but when both arms of the or are the same
+; DAGCombiner::hoistLogicOpWithSameOpcodeHands moves the zext above the or.
+define <vscale x 2 x i32> @vwaddu_vv_disjoint_or(<vscale x 2 x i16> %x.i16, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwaddu_vv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v9, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %x.i32 = zext <vscale x 2 x i16> %x.i16 to <vscale x 2 x i32>
+ %y.i32 = zext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+; TODO: We could select vwadd.vv, but when both arms of the or are the same
+; DAGCombiner::hoistLogicOpWithSameOpcodeHands moves the zext above the or.
+define <vscale x 2 x i32> @vwadd_vv_disjoint_or(<vscale x 2 x i16> %x.i16, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwadd_vv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v9, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %x.i32 = sext <vscale x 2 x i16> %x.i16 to <vscale x 2 x i32>
+ %y.i32 = sext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+define <vscale x 2 x i32> @vwaddu_wv_disjoint_or(<vscale x 2 x i32> %x.i32, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwaddu_wv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwaddu.wv v8, v8, v9
+; CHECK-NEXT: ret
+ %y.i32 = zext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+define <vscale x 2 x i32> @vwadd_wv_disjoint_or(<vscale x 2 x i32> %x.i32, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwadd_wv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwadd.wv v8, v8, v9
+; CHECK-NEXT: ret
+ %y.i32 = sext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
index 3634162..28fc53f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
@@ -341,10 +341,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i32(<vscale x 8 x i32> %va, i3
define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -355,10 +355,10 @@ define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vsc
define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -369,10 +369,10 @@ define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vs
define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -385,10 +385,10 @@ define <vscale x 1 x i64> @vwmul_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -402,11 +402,9 @@ define <vscale x 1 x i64> @vwmulu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -421,10 +419,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -437,10 +435,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i1
define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -451,10 +449,10 @@ define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vsc
define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -465,10 +463,10 @@ define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vs
define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -481,10 +479,10 @@ define <vscale x 2 x i64> @vwmul_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -498,11 +496,9 @@ define <vscale x 2 x i64> @vwmulu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -517,10 +513,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -533,10 +529,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i1
define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -547,10 +543,10 @@ define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vsc
define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -561,10 +557,10 @@ define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vs
define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vzext.vf2 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -577,10 +573,10 @@ define <vscale x 4 x i64> @vwmul_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -594,11 +590,9 @@ define <vscale x 4 x i64> @vwmulu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -613,10 +607,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vzext.vf2 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -629,10 +623,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i1
define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -643,10 +637,10 @@ define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vsc
define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vv v16, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -657,10 +651,10 @@ define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vs
define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vzext.vf2 v20, v10
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -673,10 +667,10 @@ define <vscale x 8 x i64> @vwmul_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -690,11 +684,9 @@ define <vscale x 8 x i64> @vwmulu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vwmulu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -709,10 +701,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vzext.vf2 v20, v10
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -725,10 +717,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i1
define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -739,10 +731,10 @@ define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscal
define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -753,10 +745,10 @@ define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vsca
define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -769,10 +761,10 @@ define <vscale x 1 x i64> @vwmul_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -786,11 +778,9 @@ define <vscale x 1 x i64> @vwmulu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf4 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -805,10 +795,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -821,10 +811,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %
define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -835,10 +825,10 @@ define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscal
define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -849,10 +839,10 @@ define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vsca
define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -865,10 +855,10 @@ define <vscale x 2 x i64> @vwmul_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -882,11 +872,9 @@ define <vscale x 2 x i64> @vwmulu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -901,10 +889,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -917,10 +905,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %
define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -931,10 +919,10 @@ define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscal
define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -945,10 +933,10 @@ define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vsca
define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vzext.vf4 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -961,10 +949,10 @@ define <vscale x 4 x i64> @vwmul_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -978,11 +966,9 @@ define <vscale x 4 x i64> @vwmulu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -997,10 +983,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vzext.vf4 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -1013,10 +999,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %
define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1027,10 +1013,10 @@ define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscal
define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v16, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1041,10 +1027,10 @@ define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vsca
define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vzext.vf4 v20, v9
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1057,10 +1043,10 @@ define <vscale x 8 x i64> @vwmul_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -1074,11 +1060,9 @@ define <vscale x 8 x i64> @vwmulu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -1093,10 +1077,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vzext.vf4 v20, v9
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
index 123469a..852814d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
@@ -421,10 +421,10 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i32(<vscale x 8 x i64> %va, i32
define <vscale x 1 x i64> @vwsub_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -451,10 +451,10 @@ define <vscale x 1 x i64> @vwsub_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -483,9 +483,9 @@ define <vscale x 1 x i64> @vwsubu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -495,9 +495,9 @@ define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vsc
define <vscale x 1 x i64> @vwsubu_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -509,9 +509,9 @@ define <vscale x 1 x i64> @vwsub_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -525,9 +525,9 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -539,10 +539,10 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
define <vscale x 2 x i64> @vwsub_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -569,10 +569,10 @@ define <vscale x 2 x i64> @vwsub_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -601,9 +601,9 @@ define <vscale x 2 x i64> @vwsubu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -613,9 +613,9 @@ define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vsc
define <vscale x 2 x i64> @vwsubu_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -627,9 +627,9 @@ define <vscale x 2 x i64> @vwsub_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -643,9 +643,9 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -657,10 +657,10 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
define <vscale x 4 x i64> @vwsub_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -687,10 +687,10 @@ define <vscale x 4 x i64> @vwsub_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -719,9 +719,9 @@ define <vscale x 4 x i64> @vwsubu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -731,9 +731,9 @@ define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vsc
define <vscale x 4 x i64> @vwsubu_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -745,9 +745,9 @@ define <vscale x 4 x i64> @vwsub_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -761,9 +761,9 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -775,10 +775,10 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
define <vscale x 8 x i64> @vwsub_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -805,10 +805,10 @@ define <vscale x 8 x i64> @vwsub_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -837,9 +837,9 @@ define <vscale x 8 x i64> @vwsubu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -849,9 +849,9 @@ define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vwsubu_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -863,9 +863,9 @@ define <vscale x 8 x i64> @vwsub_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -879,9 +879,9 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -893,10 +893,10 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
define <vscale x 1 x i64> @vwsub_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -923,10 +923,10 @@ define <vscale x 1 x i64> @vwsub_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -955,9 +955,9 @@ define <vscale x 1 x i64> @vwsubu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -967,9 +967,9 @@ define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vsca
define <vscale x 1 x i64> @vwsubu_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -981,9 +981,9 @@ define <vscale x 1 x i64> @vwsub_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -997,9 +997,9 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1011,10 +1011,10 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
define <vscale x 2 x i64> @vwsub_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1041,10 +1041,10 @@ define <vscale x 2 x i64> @vwsub_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1073,9 +1073,9 @@ define <vscale x 2 x i64> @vwsubu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -1085,9 +1085,9 @@ define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vsca
define <vscale x 2 x i64> @vwsubu_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -1099,9 +1099,9 @@ define <vscale x 2 x i64> @vwsub_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1115,9 +1115,9 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1129,10 +1129,10 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
define <vscale x 4 x i64> @vwsub_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1159,10 +1159,10 @@ define <vscale x 4 x i64> @vwsub_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1191,9 +1191,9 @@ define <vscale x 4 x i64> @vwsubu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -1203,9 +1203,9 @@ define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vsca
define <vscale x 4 x i64> @vwsubu_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -1217,9 +1217,9 @@ define <vscale x 4 x i64> @vwsub_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1233,9 +1233,9 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1247,10 +1247,10 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
define <vscale x 8 x i64> @vwsub_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1277,10 +1277,10 @@ define <vscale x 8 x i64> @vwsub_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1309,9 +1309,9 @@ define <vscale x 8 x i64> @vwsubu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -1321,9 +1321,9 @@ define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vsca
define <vscale x 8 x i64> @vwsubu_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -1335,9 +1335,9 @@ define <vscale x 8 x i64> @vwsub_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1351,9 +1351,9 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
index 06ed46f..8248c26 100644
--- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
@@ -83,7 +83,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/RISCV/spill-fill-fold.ll b/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
index a9a0cc5..8cf5f55 100644
--- a/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
+++ b/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
@@ -290,8 +290,8 @@ define double @spill_i64_to_double(i64 %a) nounwind {
; RV32ID-NEXT: fsd fs9, 40(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs10, 32(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs11, 24(sp) # 8-byte Folded Spill
-; RV32ID-NEXT: sw a1, 20(sp)
; RV32ID-NEXT: sw a0, 16(sp)
+; RV32ID-NEXT: sw a1, 20(sp)
; RV32ID-NEXT: fld fa5, 16(sp)
; RV32ID-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
; RV32ID-NEXT: #APP
@@ -804,13 +804,15 @@ define double @fill_i64_to_double(i64 %a) nounwind {
; RV32ID-NEXT: fsd fs9, 40(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs10, 32(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs11, 24(sp) # 8-byte Folded Spill
-; RV32ID-NEXT: sw a1, 20(sp)
-; RV32ID-NEXT: sw a0, 16(sp)
-; RV32ID-NEXT: fld fa5, 16(sp)
-; RV32ID-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
+; RV32ID-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
+; RV32ID-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32ID-NEXT: #APP
; RV32ID-NEXT: #NO_APP
-; RV32ID-NEXT: fld fa0, 8(sp) # 8-byte Folded Reload
+; RV32ID-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32ID-NEXT: sw a0, 16(sp)
+; RV32ID-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32ID-NEXT: sw a0, 20(sp)
+; RV32ID-NEXT: fld fa0, 16(sp)
; RV32ID-NEXT: lw ra, 172(sp) # 4-byte Folded Reload
; RV32ID-NEXT: lw s0, 168(sp) # 4-byte Folded Reload
; RV32ID-NEXT: lw s1, 164(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/stack-inst-compress.mir b/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
index 6721ff1..5cc4615 100644
--- a/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
+++ b/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
@@ -32,6 +32,7 @@ alignment: 2
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
localFrameSize: 2048
stack:
@@ -117,6 +118,7 @@ alignment: 2
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
localFrameSize: 4096
stack:
@@ -210,6 +212,7 @@ alignment: 2
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
localFrameSize: 8192
stack:
diff --git a/llvm/test/CodeGen/RISCV/strip-w-suffix.ll b/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
new file mode 100644
index 0000000..4124b3d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=STRIP %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+no-strip-w-suffix -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=NO-STRIP %s
+
+define i32 @addiw(i32 %a) {
+; STRIP-LABEL: addiw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: lui a1, 1
+; STRIP-NEXT: addi a1, a1, -1
+; STRIP-NEXT: addw a0, a0, a1
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: addiw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: lui a1, 1
+; NO-STRIP-NEXT: addiw a1, a1, -1
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: ret
+ %ret = add i32 %a, 4095
+ ret i32 %ret
+}
+
+define i32 @addw(i32 %a, i32 %b) {
+; STRIP-LABEL: addw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: add a0, a0, a1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: addw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %add = add i32 %a, %b
+ %ret = add i32 %add, 1024
+ ret i32 %ret
+}
+
+define i32 @mulw(i32 %a, i32 %b) {
+; STRIP-LABEL: mulw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: mul a0, a0, a1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: mulw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: mulw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %mul = mul i32 %a, %b
+ %ret = add i32 %mul, 1024
+ ret i32 %ret
+}
+
+define i32 @slliw(i32 %a) {
+; STRIP-LABEL: slliw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: slli a0, a0, 1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: slliw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: slliw a0, a0, 1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %shl = shl i32 %a, 1
+ %ret = add i32 %shl, 1024
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll b/llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll
new file mode 100644
index 0000000..23ba2ff
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/tlsdesc-symbol.ll
@@ -0,0 +1,24 @@
+;; The test in this file do not appear in tls-models.ll because
+;; they are not auto-generated.
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -enable-tlsdesc < %s \
+; RUN: | llvm-mc -triple=riscv64 -filetype=obj -o - \
+; RUN: | llvm-readelf --symbols - \
+; RUN: | FileCheck %s
+
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -enable-tlsdesc < %s \
+; RUN: | llvm-mc -triple=riscv32 -filetype=obj -o - \
+; RUN: | llvm-readelf --symbols - \
+; RUN: | FileCheck %s
+
+; Check that TLS symbols are lowered correctly based on the specified
+; model. Make sure they're external to avoid them all being optimised to Local
+; Exec for the executable.
+
+@unspecified = external thread_local global i32
+
+define ptr @f1() nounwind {
+entry:
+ ret ptr @unspecified
+ ; CHECK: Symbol table '.symtab' contains 7 entries:
+ ; CHECK: TLS {{.*}} unspecified
+}
diff --git a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
index 8c0d97a..f1ae320 100644
--- a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
@@ -89,8 +89,8 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) #0 {
; RISCV32-NEXT: snez a3, a3
; RISCV32-NEXT: and a3, a3, a7
; RISCV32-NEXT: or a2, a3, a2
-; RISCV32-NEXT: or a3, t2, t3
-; RISCV32-NEXT: or a2, a2, a3
+; RISCV32-NEXT: or a2, a2, t2
+; RISCV32-NEXT: or a2, a2, t3
; RISCV32-NEXT: mul a3, a5, a4
; RISCV32-NEXT: andi a2, a2, 1
; RISCV32-NEXT: sw a3, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
index e5c2e01..73ace20 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
@@ -3,7 +3,8 @@
define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-LABEL: func:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: cm.push {ra, s0-s1}, -24
+; RV32-NEXT: cm.push {ra, s0-s1}, -16
+; RV32-NEXT: addi sp, sp, -8
; RV32-NEXT: .cfi_def_cfa_offset 24
; RV32-NEXT: .cfi_offset ra, -12
; RV32-NEXT: .cfi_offset s0, -8
@@ -31,7 +32,8 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: sb a0, 0(s0)
; RV32-NEXT: mv a0, s1
-; RV32-NEXT: cm.popret {ra, s0-s1}, 24
+; RV32-NEXT: addi sp, sp, 8
+; RV32-NEXT: cm.popret {ra, s0-s1}, 16
entry:
br label %while.body
diff --git a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
index 63c46ca..95695aa 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
@@ -4,21 +4,15 @@
define dso_local void @zdinx_asm(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
; CHECK-LABEL: zdinx_asm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 8(sp)
-; CHECK-NEXT: sw a2, 12(sp)
-; CHECK-NEXT: lw a6, 8(sp)
-; CHECK-NEXT: lw a7, 12(sp)
-; CHECK-NEXT: sw a3, 8(sp)
-; CHECK-NEXT: sw a4, 12(sp)
-; CHECK-NEXT: lw a2, 8(sp)
-; CHECK-NEXT: lw a3, 12(sp)
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: mv a7, a2
+; CHECK-NEXT: mv a4, a3
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: #APP
-; CHECK-NEXT: fsgnjx.d a2, a6, a2
+; CHECK-NEXT: fsgnjx.d a2, a6, a4
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: sw a2, 8(a0)
; CHECK-NEXT: sw a3, 12(a0)
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds double, ptr %a, i32 1
diff --git a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll
index 3eeb704..f56d477 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll
@@ -7,15 +7,11 @@
define void @foo(ptr nocapture %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
+; RV32ZDINX-NEXT: mv a3, a2
; RV32ZDINX-NEXT: addi a0, a0, 2047
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: sw a2, -3(a0)
; RV32ZDINX-NEXT: sw a3, 1(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo:
@@ -31,16 +27,12 @@ entry:
define void @foo2(ptr nocapture %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo2:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
+; RV32ZDINX-NEXT: mv a3, a2
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: fadd.d a2, a2, a2
; RV32ZDINX-NEXT: addi a0, a0, 2047
; RV32ZDINX-NEXT: sw a2, -3(a0)
; RV32ZDINX-NEXT: sw a3, 1(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo2:
@@ -117,15 +109,11 @@ entry:
define void @foo5(ptr nocapture %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo5:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
+; RV32ZDINX-NEXT: mv a3, a2
; RV32ZDINX-NEXT: addi a0, a0, -2048
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: sw a2, -1(a0)
; RV32ZDINX-NEXT: sw a3, 3(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo5:
@@ -142,19 +130,15 @@ entry:
define void @foo6(ptr %p, double %d) nounwind {
; RV32ZDINX-LABEL: foo6:
; RV32ZDINX: # %bb.0: # %entry
-; RV32ZDINX-NEXT: addi sp, sp, -16
-; RV32ZDINX-NEXT: sw a1, 8(sp)
-; RV32ZDINX-NEXT: sw a2, 12(sp)
-; RV32ZDINX-NEXT: lw a2, 8(sp)
-; RV32ZDINX-NEXT: lw a3, 12(sp)
-; RV32ZDINX-NEXT: lui a1, %hi(.LCPI5_0)
-; RV32ZDINX-NEXT: lw a4, %lo(.LCPI5_0)(a1)
-; RV32ZDINX-NEXT: lw a5, %lo(.LCPI5_0+4)(a1)
+; RV32ZDINX-NEXT: lui a3, %hi(.LCPI5_0)
+; RV32ZDINX-NEXT: lw a4, %lo(.LCPI5_0)(a3)
+; RV32ZDINX-NEXT: lw a5, %lo(.LCPI5_0+4)(a3)
+; RV32ZDINX-NEXT: mv a3, a2
+; RV32ZDINX-NEXT: mv a2, a1
; RV32ZDINX-NEXT: fadd.d a2, a2, a4
; RV32ZDINX-NEXT: addi a0, a0, 2047
; RV32ZDINX-NEXT: sw a2, -3(a0)
; RV32ZDINX-NEXT: sw a3, 1(a0)
-; RV32ZDINX-NEXT: addi sp, sp, 16
; RV32ZDINX-NEXT: ret
;
; RV64ZDINX-LABEL: foo6:
diff --git a/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
new file mode 100644
index 0000000..8596a65
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
@@ -0,0 +1,74 @@
+# NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+# RUN: llc %s -mtriple=riscv32 -mattr=+zdinx -start-before=prologepilog -o - | FileCheck %s
+
+# We want to make sure eliminateFrameIndex doesn't fold sp+2044 as an offset in
+# a GPR pair spill/reload instruction. When we split the pair spill, we would be
+# unable to add 4 to the immediate without overflowing simm12.
+
+--- |
+ define void @foo() {
+ ; CHECK-LABEL: foo:
+ ; CHECK: # %bb.0:
+ ; CHECK-NEXT: addi sp, sp, -2048
+ ; CHECK-NEXT: addi sp, sp, -16
+ ; CHECK-NEXT: .cfi_def_cfa_offset 2064
+ ; CHECK-NEXT: lui t0, 1
+ ; CHECK-NEXT: add t0, sp, t0
+ ; CHECK-NEXT: sw a0, -2040(t0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a1, -2036(t0) # 4-byte Folded Spill
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: sw a2, -2048(a0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a3, -2044(a0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a4, 2040(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a5, 2044(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a6, 2032(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a7, 2036(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: lw a1, -2036(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a0, -2040(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: lw a2, -2048(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a3, -2044(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a4, 2040(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a5, 2044(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a6, 2032(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a7, 2036(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: addi sp, sp, 2032
+ ; CHECK-NEXT: addi sp, sp, 32
+ ; CHECK-NEXT: ret
+ ret void
+ }
+...
+---
+name: foo
+tracksRegLiveness: true
+tracksDebugUserValues: true
+frameInfo:
+ maxAlignment: 4
+stack:
+ - { id: 0, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 1, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 2, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 3, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 4, type: spill-slot, size: 2024, alignment: 4 }
+machineFunctionInfo:
+ varArgsFrameIndex: 0
+ varArgsSaveSize: 0
+body: |
+ bb.0:
+ liveins: $x10_x11, $x12_x13, $x14_x15, $x16_x17
+
+ PseudoRV32ZdinxSD killed renamable $x10_x11, %stack.0, 0 :: (store (s64) into %stack.0, align 4)
+ PseudoRV32ZdinxSD killed renamable $x12_x13, %stack.1, 0 :: (store (s64) into %stack.1, align 4)
+ PseudoRV32ZdinxSD killed renamable $x14_x15, %stack.2, 0 :: (store (s64) into %stack.2, align 4)
+ PseudoRV32ZdinxSD killed renamable $x16_x17, %stack.3, 0 :: (store (s64) into %stack.3, align 4)
+ renamable $x10_x11 = PseudoRV32ZdinxLD %stack.0, 0 :: (load (s64) from %stack.0, align 4)
+ renamable $x12_x13 = PseudoRV32ZdinxLD %stack.1, 0 :: (load (s64) from %stack.1, align 4)
+ renamable $x14_x15 = PseudoRV32ZdinxLD %stack.2, 0 :: (load (s64) from %stack.2, align 4)
+ renamable $x16_x17 = PseudoRV32ZdinxLD %stack.3, 0 :: (load (s64) from %stack.3, align 4)
+ PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/SPIRV/ComparePointers.ll b/llvm/test/CodeGen/SPIRV/ComparePointers.ll
index fd2084d..6777fc3 100644
--- a/llvm/test/CodeGen/SPIRV/ComparePointers.ll
+++ b/llvm/test/CodeGen/SPIRV/ComparePointers.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown --mattr=+spirv1.3 %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; kernel void test(int global *in, int global *in2) {
;; if (!in)
diff --git a/llvm/test/CodeGen/SPIRV/ExecutionMode.ll b/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
index 3e321e1..180b7246 100644
--- a/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
+++ b/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#VOID:]] = OpTypeVoid
diff --git a/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll b/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
index 3dfdeac..ec660b7 100644
--- a/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
+++ b/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-EXT: Capability Linkage
; CHECK-SPIRV-EXT: Extension "SPV_KHR_linkonce_odr"
diff --git a/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll b/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
index 7505c3f..42170dc 100644
--- a/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
+++ b/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-EXT: Capability Linkage
; CHECK-SPIRV-EXT: Extension "SPV_KHR_linkonce_odr"
diff --git a/llvm/test/CodeGen/SPIRV/assume.ll b/llvm/test/CodeGen/SPIRV/assume.ll
index 6099955..fbf12ef 100644
--- a/llvm/test/CodeGen/SPIRV/assume.ll
+++ b/llvm/test/CodeGen/SPIRV/assume.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
-; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
+; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
+; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
; RUN: llc -mtriple=spirv32-unknown-unknown < %s | FileCheck --check-prefixes=NOEXT,CHECK %s
; RUN: llc -mtriple=spirv64-unknown-unknown < %s | FileCheck --check-prefixes=NOEXT,CHECK %s
diff --git a/llvm/test/CodeGen/SPIRV/capability-kernel.ll b/llvm/test/CodeGen/SPIRV/capability-kernel.ll
index 03ea58c..fea1951 100644
--- a/llvm/test/CodeGen/SPIRV/capability-kernel.ll
+++ b/llvm/test/CodeGen/SPIRV/capability-kernel.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Addresses
diff --git a/llvm/test/CodeGen/SPIRV/empty-logical.ll b/llvm/test/CodeGen/SPIRV/empty-logical.ll
index a99df5f..1c66040 100644
--- a/llvm/test/CodeGen/SPIRV/empty-logical.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-logical.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Ensure the required Capabilities are listed.
; CHECK-DAG: OpCapability Shader
diff --git a/llvm/test/CodeGen/SPIRV/empty-module.ll b/llvm/test/CodeGen/SPIRV/empty-module.ll
index f220176..b56e58c 100644
--- a/llvm/test/CodeGen/SPIRV/empty-module.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-module.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Addresses
; CHECK-DAG: OpCapability Linkage
diff --git a/llvm/test/CodeGen/SPIRV/empty-opencl32.ll b/llvm/test/CodeGen/SPIRV/empty-opencl32.ll
index a373781..8e826ec 100644
--- a/llvm/test/CodeGen/SPIRV/empty-opencl32.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-opencl32.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; FIXME: ensure Magic Number, version number, generator's magic number, "bound" and "schema" are at least present
diff --git a/llvm/test/CodeGen/SPIRV/empty-opencl64.ll b/llvm/test/CodeGen/SPIRV/empty-opencl64.ll
index d101965..4eaa2e4 100644
--- a/llvm/test/CodeGen/SPIRV/empty-opencl64.ll
+++ b/llvm/test/CodeGen/SPIRV/empty-opencl64.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; FIXME: ensure Magic Number, version number, generator's magic number, "bound" and "schema" are at least present
diff --git a/llvm/test/CodeGen/SPIRV/empty.ll b/llvm/test/CodeGen/SPIRV/empty.ll
index fdcf316..390ab32 100644
--- a/llvm/test/CodeGen/SPIRV/empty.ll
+++ b/llvm/test/CodeGen/SPIRV/empty.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: OpCapability Addresses
; CHECK: "foo"
diff --git a/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll b/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
index 473794a..721e825 100644
--- a/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
+++ b/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=SPV
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --mattr=+spirv1.3 --spirv-extensions=SPV_KHR_float_controls -o - | FileCheck %s --check-prefixes=SPVEXT
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --mattr=+spirv1.3 --spirv-ext=+SPV_KHR_float_controls -o - | FileCheck %s --check-prefixes=SPVEXT
define dso_local dllexport spir_kernel void @k_float_controls_0(i32 %ibuf, i32 %obuf) local_unnamed_addr {
entry:
diff --git a/llvm/test/CodeGen/SPIRV/expect.ll b/llvm/test/CodeGen/SPIRV/expect.ll
index 51555cd..82c1ec7 100644
--- a/llvm/test/CodeGen/SPIRV/expect.ll
+++ b/llvm/test/CodeGen/SPIRV/expect.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
-; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
+; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
+; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
; RUN: llc -mtriple=spirv32-unknown-unknown < %s | FileCheck --check-prefixes=CHECK,NOEXT %s
; RUN: llc -mtriple=spirv64-unknown-unknown < %s | FileCheck --check-prefixes=CHECK,NOEXT %s
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
index 1bfa556..e7b6679 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
index 627b59f..4fb99d9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
index fffda4b..2f536dc 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
@@ -1,7 +1,7 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR1
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR2
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR2
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add --spirv-extensions=SPV_EXT_shader_atomic_float16_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add,+SPV_EXT_shader_atomic_float16_add %s -o - | FileCheck %s
; CHECK-ERROR1: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
; CHECK-ERROR2: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float16_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
index 3c6fa27..7654c36 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
index cc52e4c..8a35990 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
index b406aee..45baaa8 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
index b68fb36..f49367c 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
define i6 @getConstantI6() {
ret i6 2
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
index 2f3c859d..4326d8d 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
index 8ab84d6..57f52b9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
index 20a8042..2cb229e 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
index efbd50b..eb5a2c7 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
index 2bd59b2..91fa340e 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o - | FileCheck %s
-; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o - -filetype=obj | spirv-val %}
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: the builtin requires the following SPIR-V extension: SPV_INTEL_bfloat16_conversion
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
index 0bd1b5d..5f073e9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Int8
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
index 89de098..b7fecef 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Int8
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
index afbcaec..a611be8 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_optnone %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-EXTENSION
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_optnone %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-EXTENSION
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NO-EXTENSION
; CHECK-EXTENSION: OpCapability OptNoneINTEL
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
index 0e0b2a4..df17ec4 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
@@ -37,7 +37,7 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_subgroups %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_subgroups %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: intel_sub_group_shuffle: the builtin requires the following SPIR-V extension: SPV_INTEL_subgroups
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
index 30c1635..b5df462 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
@@ -1,7 +1,7 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_usm_storage_classes/intel_usm_addrspaces.ll
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %}
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-WITHOUT
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
index 897aab7..8a54d22 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
@@ -1,8 +1,8 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_variable_length_array/basic.ll
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
; CHECK-ERROR: LLVM ERROR: array allocation: this instruction requires the following SPIR-V extension: SPV_INTEL_variable_length_array
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
index fbac43e..7b9f75d 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
@@ -1,7 +1,7 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_variable_length_array/vla_spec_const.ll
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: Capability VariableLengthArrayINTEL
; CHECK-SPIRV: Extension "SPV_INTEL_variable_length_array"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
index 95395d5..100f02f 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --spirv-extensions=SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-NO-EXTENSION
; CHECK-EXTENSION: OpCapability BitInstructions
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
index e74dd99..0d9ab4a 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s
; CHECK-DAG: OpExtension "SPV_KHR_no_integer_wrap_decoration"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
index b1d6a09..63aade4 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - | FileCheck %s
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_subgroup_rotate %s -o - | FileCheck %s
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %}
; CHECK-ERROR: LLVM ERROR: OpGroupNonUniformRotateKHR instruction requires the following SPIR-V extension: SPV_KHR_subgroup_rotate
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
index 39bf63d..0de654b 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_uniform_group_instructions %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_uniform_group_instructions %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: __spirv_GroupBitwiseAndKHR: the builtin requires the following SPIR-V extension: SPV_KHR_uniform_group_instructions
diff --git a/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll b/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll
new file mode 100644
index 0000000..fc07cca
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll
@@ -0,0 +1,7 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers,-SPV_INTEL_arbitrary_precision_integers %s -o %t.spvt 2>&1 | FileCheck %s
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=-SPV_INTEL_arbitrary_precision_integers,+SPV_INTEL_arbitrary_precision_integers %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Extension cannot be allowed and disallowed at the same time: SPV_INTEL_arbitrary_precision_integers
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll
new file mode 100644
index 0000000..973a5e6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll
@@ -0,0 +1,9 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=all,-SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
+
+define i6 @foo() {
+ %call = tail call i32 @llvm.bitreverse.i32(i32 42)
+ ret i6 2
+}
+
+; CHECK-NOT: OpExtension "SPV_INTEL_arbitrary_precision_integers"
+; CHECK-DAG: OpExtension "SPV_KHR_bit_instructions"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll
new file mode 100644
index 0000000..a5b97946
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll
@@ -0,0 +1,7 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=all %s -o - | FileCheck %s
+
+define i6 @getConstantI6() {
+ ret i6 2
+}
+
+; CHECK: OpExtension "SPV_INTEL_arbitrary_precision_integers"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll b/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll
new file mode 100644
index 0000000..207ed4b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=UNKNOWN_EXTENSION %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Invalid extension list format: UNKNOWN_EXTENSION
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll b/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll
new file mode 100644
index 0000000..f4f5424
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+UNKNOWN_EXTENSION %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Unknown SPIR-V extension: +UNKNOWN_EXTENSION
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/fence.ll b/llvm/test/CodeGen/SPIRV/fence.ll
new file mode 100644
index 0000000..5da5866
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/fence.ll
@@ -0,0 +1,54 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#GetScope:]] "_Z8getScopev"
+; CHECK-DAG: %[[#Long:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#ScopeDevice:]] = OpConstant %[[#Long]] 1
+; CHECK-DAG: %[[#WrkGrpConst2:]] = OpConstant %[[#Long]] 2
+; CHECK-DAG: %[[#Const3:]] = OpConstant %[[#Long]] 3
+; CHECK-DAG: %[[#InvocationConst4:]] = OpConstant %[[#Long]] 4
+; CHECK-DAG: %[[#Const8:]] = OpConstant %[[#Long]] 8
+; CHECK-DAG: %[[#Const16:]] = OpConstant %[[#Long]] 16
+; CHECK-DAG: %[[#Const912:]] = OpConstant %[[#Long]] 912
+; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#WrkGrpConst2]]
+; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#InvocationConst4]]
+; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#Const8]]
+; CHECK: OpMemoryBarrier %[[#InvocationConst4]] %[[#Const16]]
+; CHECK: OpMemoryBarrier %[[#WrkGrpConst2]] %[[#InvocationConst4]]
+; CHECK: OpFunctionEnd
+; CHECK: %[[#ScopeId:]] = OpFunctionCall %[[#Long]] %[[#GetScope]]
+; CHECK: OpControlBarrier %[[#Const3]] %[[#ScopeId:]] %[[#Const912]]
+
+define spir_kernel void @fence_test_kernel1(ptr addrspace(1) noalias %s.ascast) {
+ fence acquire
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel2(ptr addrspace(1) noalias %s.ascast) {
+ fence release
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel3(ptr addrspace(1) noalias %s.ascast) {
+ fence acq_rel
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel4(ptr addrspace(1) noalias %s.ascast) {
+ fence syncscope("singlethread") seq_cst
+ ret void
+}
+
+define spir_kernel void @fence_test_kernel5(ptr addrspace(1) noalias %s.ascast) {
+ fence syncscope("workgroup") release
+ ret void
+}
+
+define spir_func void @barrier_test1() {
+ %scope = call noundef i32 @_Z8getScopev()
+ call void @_Z22__spirv_ControlBarrieriii(i32 noundef 3, i32 noundef %scope, i32 noundef 912)
+ ret void
+}
+
+declare spir_func void @_Z22__spirv_ControlBarrieriii(i32 noundef, i32 noundef, i32 noundef)
+declare spir_func i32 @_Z8getScopev()
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll
new file mode 100644
index 0000000..ec35690
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll
@@ -0,0 +1,68 @@
+; RUN: llc -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %}
+
+; This file generated from the following command:
+; clang -cc1 -triple spirv-vulkan-compute -x hlsl -emit-llvm -finclude-default-header -o - - <<EOF
+; [numthreads(1, 1, 1)]
+; void main() {
+; int idx = WaveGetLaneIndex();
+; }
+; EOF
+
+; CHECK-DAG: OpCapability Shader
+; CHECK-DAG: OpCapability GroupNonUniform
+; CHECK-DAG: OpDecorate %[[#var:]] BuiltIn SubgroupLocalInvocationId
+; CHECK-DAG: %[[#int:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#ptri:]] = OpTypePointer Input %[[#int]]
+; CHECK-DAG: %[[#ptrf:]] = OpTypePointer Function %[[#int]]
+; CHECK-DAG: %[[#var]] = OpVariable %[[#ptri]] Input
+
+; CHECK-NOT: OpDecorate %[[#var]] LinkageAttributes
+
+
+; ModuleID = '-'
+source_filename = "-"
+target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
+target triple = "spirv-unknown-vulkan-compute"
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define internal spir_func void @main() #0 {
+entry:
+ %0 = call token @llvm.experimental.convergence.entry()
+ %idx = alloca i32, align 4
+; CHECK: %[[#idx:]] = OpVariable %[[#ptrf]] Function
+
+ %1 = call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %0) ]
+; CHECK: %[[#tmp:]] = OpLoad %[[#int]] %[[#var]]
+
+ store i32 %1, ptr %idx, align 4
+; CHECK: OpStore %[[#idx]] %[[#tmp]]
+
+ ret void
+}
+
+; Function Attrs: norecurse
+define void @main.1() #1 {
+entry:
+ call void @main()
+ ret void
+}
+
+; Function Attrs: convergent
+declare i32 @__hlsl_wave_get_lane_index() #2
+
+; Function Attrs: convergent nocallback nofree nosync nounwind willreturn memory(none)
+declare token @llvm.experimental.convergence.entry() #3
+
+attributes #0 = { convergent noinline norecurse nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+attributes #1 = { norecurse "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+attributes #2 = { convergent }
+attributes #3 = { convergent nocallback nofree nosync nounwind willreturn memory(none) }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 4, !"dx.disable_optimizations", i32 1}
+!2 = !{!"clang version 19.0.0git (/usr/local/google/home/nathangauer/projects/llvm-project/clang bc6fd04b73a195981ee77823cf1382d04ab96c44)"}
+
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll
index 7031129..38c033b 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-linux %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: OpExtInstImport "GLSL.std.450"
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll
new file mode 100644
index 0000000..1b358ae
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ceil.ll
@@ -0,0 +1,20 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @ceil_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Ceil %[[#]]
+ %elt.ceil = call float @llvm.ceil.f32(float %a)
+ ret float %elt.ceil
+}
+
+define noundef half @ceil_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Ceil %[[#]]
+ %elt.ceil = call half @llvm.ceil.f16(half %a)
+ ret half %elt.ceil
+}
+
+declare half @llvm.ceil.f16(half)
+declare float @llvm.ceil.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll
new file mode 100644
index 0000000..28675cf
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cos.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @cos_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Cos %[[#]]
+ %elt.cos = call float @llvm.cos.f32(float %a)
+ ret float %elt.cos
+}
+
+define noundef half @cos_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Cos %[[#]]
+ %elt.cos = call half @llvm.cos.f16(half %a)
+ ret half %elt.cos
+}
+
+declare half @llvm.cos.f16(half)
+declare float @llvm.cos.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll
new file mode 100644
index 0000000..ee230df
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @exp_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp %[[#]]
+ %elt.exp = call float @llvm.exp.f32(float %a)
+ ret float %elt.exp
+}
+
+define noundef half @exp_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp %[[#]]
+ %elt.exp = call half @llvm.exp.f16(half %a)
+ ret half %elt.exp
+}
+
+declare half @llvm.exp.f16(half)
+declare float @llvm.exp.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll
new file mode 100644
index 0000000..eeaca1b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/exp2.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @exp2_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp2 %[[#]]
+ %elt.exp2 = call float @llvm.exp2.f32(float %a)
+ ret float %elt.exp2
+}
+
+define noundef half @exp2_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Exp2 %[[#]]
+ %elt.exp2 = call half @llvm.exp2.f16(half %a)
+ ret half %elt.exp2
+}
+
+declare half @llvm.exp2.f16(half)
+declare float @llvm.exp2.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll
new file mode 100644
index 0000000..5b972104
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/floor.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @floor_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Floor %[[#]]
+ %elt.floor = call float @llvm.floor.f32(float %a)
+ ret float %elt.floor
+}
+
+define noundef half @floor_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Floor %[[#]]
+ %elt.floor = call half @llvm.floor.f16(half %a)
+ ret half %elt.floor
+}
+
+declare half @llvm.floor.f16(half)
+declare float @llvm.floor.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll
new file mode 100644
index 0000000..a3fec10
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmad.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef half @fmad_half(half noundef %a, half noundef %b, half noundef %c) #0 {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Fma %[[#]] %[[#]] %[[#]]
+ %dx.fmad = call half @llvm.fmuladd.f16(half %a, half %b, half %c)
+ ret half %dx.fmad
+}
+
+define noundef float @fmad_float(float noundef %a, float noundef %b, float noundef %c) #0 {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Fma %[[#]] %[[#]] %[[#]]
+ %dx.fmad = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ ret float %dx.fmad
+}
+
+define noundef double @fmad_double(double noundef %a, double noundef %b, double noundef %c) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Fma %[[#]] %[[#]] %[[#]]
+ %dx.fmad = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
+ ret double %dx.fmad
+}
+
+declare half @llvm.fmuladd.f16(half, half, half)
+declare float @llvm.fmuladd.f32(float, float, float)
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll
new file mode 100644
index 0000000..48e9165
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmax.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; TODO: This need to be NMax: See https://github.com/llvm/llvm-project/issues/87072
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef half @test_fmax_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMax %[[#]] %[[#]]
+ %0 = call half @llvm.maxnum.f16(half %a, half %b)
+ ret half %0
+}
+
+define noundef float @test_fmax_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMax %[[#]] %[[#]]
+ %0 = call float @llvm.maxnum.f32(float %a, float %b)
+ ret float %0
+}
+
+define noundef double @test_fmax_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMax %[[#]] %[[#]]
+ %0 = call double @llvm.maxnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.maxnum.f16(half, half)
+declare float @llvm.maxnum.f32(float, float)
+declare double @llvm.maxnum.f64(double, double)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll
new file mode 100644
index 0000000..5bfd69c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/fmin.ll
@@ -0,0 +1,31 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; TODO: This need to be NMin: See https://github.com/llvm/llvm-project/issues/87072
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+; CHECK: OpMemoryModel Logical GLSL450
+
+define noundef half @test_fmax_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMin %[[#]] %[[#]]
+ %0 = call half @llvm.minnum.f16(half %a, half %b)
+ ret half %0
+}
+
+define noundef float @test_fmax_float(float noundef %a, float noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMin %[[#]] %[[#]]
+ %0 = call float @llvm.minnum.f32(float %a, float %b)
+ ret float %0
+}
+
+define noundef double @test_fmax_double(double noundef %a, double noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FMin %[[#]] %[[#]]
+ %0 = call double @llvm.minnum.f64(double %a, double %b)
+ ret double %0
+}
+
+declare half @llvm.minnum.f16(half, half)
+declare float @llvm.minnum.f32(float, float)
+declare double @llvm.minnum.f64(double, double)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll
new file mode 100644
index 0000000..5a09f32
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @log_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log %[[#]]
+ %elt.log = call float @llvm.log.f32(float %a)
+ ret float %elt.log
+}
+
+define noundef half @log_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log %[[#]]
+ %elt.log = call half @llvm.log.f16(half %a)
+ ret half %elt.log
+}
+
+declare half @llvm.log.f16(half)
+declare float @llvm.log.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll
index e7b00eb..52ca6812 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-linux %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#extinst:]] = OpExtInstImport "GLSL.std.450"
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll
new file mode 100644
index 0000000..21f02a4
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log2.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @log2_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log2 %[[#]]
+ %elt.log2 = call float @llvm.log2.f32(float %a)
+ ret float %elt.log2
+}
+
+define noundef half @log2_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Log2 %[[#]]
+ %elt.log2 = call half @llvm.log2.f16(half %a)
+ ret half %elt.log2
+}
+
+declare half @llvm.log2.f16(half)
+declare float @llvm.log2.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll
new file mode 100644
index 0000000..7fae963
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/pow.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @pow_float(float noundef %a,float noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Pow %[[#]]
+ %elt.pow = call float @llvm.pow.f32(float %a,float %b)
+ ret float %elt.pow
+}
+
+define noundef half @pow_half(half noundef %a, half noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Pow %[[#]]
+ %elt.pow = call half @llvm.pow.f16(half %a, half %b)
+ ret half %elt.pow
+}
+
+declare half @llvm.pow.f16(half,half)
+declare float @llvm.pow.f32(float,float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll
new file mode 100644
index 0000000..e58c9ab
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/reversebits.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpMemoryModel Logical GLSL450
+
+define noundef i32 @reversebits_i32(i32 noundef %a) {
+entry:
+; CHECK: %[[#]] = OpBitReverse %[[#]] %[[#]]
+ %elt.bitreverse = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %elt.bitreverse
+}
+
+define noundef i16 @reversebits_i16(i16 noundef %a) {
+entry:
+; CHECK: %[[#]] = OpBitReverse %[[#]] %[[#]]
+ %elt.bitreverse = call i16 @llvm.bitreverse.i16(i16 %a)
+ ret i16 %elt.bitreverse
+}
+
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll
new file mode 100644
index 0000000..baf2083
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/round.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @round_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] RoundEven %[[#]]
+ %elt.roundeven = call float @llvm.roundeven.f32(float %a)
+ ret float %elt.roundeven
+}
+
+define noundef half @round_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] RoundEven %[[#]]
+ %elt.roundeven = call half @llvm.roundeven.f16(half %a)
+ ret half %elt.roundeven
+}
+
+declare half @llvm.roundeven.f16(half)
+declare float @llvm.roundeven.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll
new file mode 100644
index 0000000..061af5b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sin.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @sin_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sin %[[#]]
+ %elt.sin = call float @llvm.sin.f32(float %a)
+ ret float %elt.sin
+}
+
+define noundef half @sin_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sin %[[#]]
+ %elt.sin = call half @llvm.sin.f16(half %a)
+ ret half %elt.sin
+}
+
+declare half @llvm.sin.f16(half)
+declare float @llvm.sin.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll
new file mode 100644
index 0000000..6bbf103
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smax.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef i16 @test_smax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMax %[[#]] %[[#]]
+ %0 = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+define noundef i32 @test_smax_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMax %[[#]] %[[#]]
+ %0 = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+define noundef i64 @test_smax_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMax %[[#]] %[[#]]
+ %0 = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll
new file mode 100644
index 0000000..04ab960
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/smin.ll
@@ -0,0 +1,32 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+
+define noundef i16 @test_smin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMin %[[#]] %[[#]]
+ %0 = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+
+define noundef i32 @test_smin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMin %[[#]] %[[#]]
+ %0 = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+
+define noundef i64 @test_smin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SMin %[[#]] %[[#]]
+ %0 = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll
new file mode 100644
index 0000000..6882b77
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/sqrt.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @sqrt_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sqrt %[[#]]
+ %elt.sqrt = call float @llvm.sqrt.f32(float %a)
+ ret float %elt.sqrt
+}
+
+define noundef half @sqrt_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Sqrt %[[#]]
+ %elt.sqrt = call half @llvm.sqrt.f16(half %a)
+ ret half %elt.sqrt
+}
+
+declare half @llvm.sqrt.f16(half)
+declare float @llvm.sqrt.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll
new file mode 100644
index 0000000..d75b7fa
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/trunc.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef float @trunc_float(float noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Trunc %[[#]]
+ %elt.trunc = call float @llvm.trunc.f32(float %a)
+ ret float %elt.trunc
+}
+
+define noundef half @trunc_half(half noundef %a) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Trunc %[[#]]
+ %elt.trunc = call half @llvm.trunc.f16(half %a)
+ ret half %elt.trunc
+}
+
+declare half @llvm.trunc.f16(half)
+declare float @llvm.trunc.f32(float)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll
new file mode 100644
index 0000000..32677df
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umax.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+define noundef i16 @test_umax_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMax %[[#]] %[[#]]
+ %0 = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+define noundef i32 @test_umax_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMax %[[#]] %[[#]]
+ %0 = call i32 @llvm.umax.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+define noundef i64 @test_umax_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMax %[[#]] %[[#]]
+ %0 = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll
new file mode 100644
index 0000000..a91fb80
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/umin.ll
@@ -0,0 +1,32 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpExtInstImport "GLSL.std.450"
+
+
+define noundef i16 @test_umin_i16(i16 noundef %a, i16 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMin %[[#]] %[[#]]
+ %0 = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+ ret i16 %0
+}
+
+
+define noundef i32 @test_umin_i32(i32 noundef %a, i32 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMin %[[#]] %[[#]]
+ %0 = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+
+define noundef i64 @test_umin_i64(i64 noundef %a, i64 noundef %b) {
+entry:
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] UMin %[[#]] %[[#]]
+ %0 = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+ ret i64 %0
+}
+
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
index 9715504..ce59bb2 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[ADD:%.*]] "test_add"
; CHECK-DAG: OpName [[SUB:%.*]] "test_sub"
@@ -20,7 +21,8 @@
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_add(i32* %ptr, i32 %val) {
@@ -32,7 +34,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_sub(i32* %ptr, i32 %val) {
@@ -44,7 +47,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_min(i32* %ptr, i32 %val) {
@@ -56,7 +60,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_max(i32* %ptr, i32 %val) {
@@ -68,7 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umin(i32* %ptr, i32 %val) {
@@ -80,7 +86,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umax(i32* %ptr, i32 %val) {
@@ -92,7 +99,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_and(i32* %ptr, i32 %val) {
@@ -104,7 +112,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_or(i32* %ptr, i32 %val) {
@@ -116,7 +125,8 @@ define i32 @test_or(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[BC_A]] [[SCOPE]] [[RELAXED]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_xor(i32* %ptr, i32 %val) {
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
index 63c0ae7..950dfe4 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[ADD:%.*]] "test_add"
; CHECK-DAG: OpName [[SUB:%.*]] "test_sub"
@@ -20,7 +21,8 @@
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_add(i32* %ptr, i32 %val) {
@@ -32,7 +34,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_sub(i32* %ptr, i32 %val) {
@@ -44,7 +47,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_min(i32* %ptr, i32 %val) {
@@ -56,7 +60,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_max(i32* %ptr, i32 %val) {
@@ -68,7 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umin(i32* %ptr, i32 %val) {
@@ -80,7 +86,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umax(i32* %ptr, i32 %val) {
@@ -92,7 +99,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_and(i32* %ptr, i32 %val) {
@@ -104,7 +112,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_or(i32* %ptr, i32 %val) {
@@ -116,7 +125,8 @@ define i32 @test_or(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[BC_A]] [[SCOPE]] [[ACQREL]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_xor(i32* %ptr, i32 %val) {
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
index f6a8fe1..f142e01 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[ADD:%.*]] "test_add"
; CHECK-DAG: OpName [[SUB:%.*]] "test_sub"
@@ -20,7 +21,8 @@
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_add(i32* %ptr, i32 %val) {
@@ -32,7 +34,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_sub(i32* %ptr, i32 %val) {
@@ -44,7 +47,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_min(i32* %ptr, i32 %val) {
@@ -56,7 +60,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_max(i32* %ptr, i32 %val) {
@@ -68,7 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umin(i32* %ptr, i32 %val) {
@@ -80,7 +86,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umax(i32* %ptr, i32 %val) {
@@ -92,7 +99,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_and(i32* %ptr, i32 %val) {
@@ -104,7 +112,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_or(i32* %ptr, i32 %val) {
@@ -116,7 +125,8 @@ define i32 @test_or(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
+; CHECK-NEXT: [[BC_A:%.*]] = OpBitcast %[[#]] [[A]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[BC_A]] [[SCOPE]] [[SEQ]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_xor(i32* %ptr, i32 %val) {
diff --git a/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll b/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll
new file mode 100644
index 0000000..8d3657b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll
@@ -0,0 +1,69 @@
+; This test ensures that LLVM IR bitwise instructions result in logical SPIR-V instructions
+; when applied to i1 type
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#Char:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#Vec2Char:]] = OpTypeVector %[[#Char]] 2
+; CHECK-DAG: %[[#Bool:]] = OpTypeBool
+; CHECK-DAG: %[[#Vec2Bool:]] = OpTypeVector %[[#Bool]] 2
+
+; CHECK: OpBitwiseAnd %[[#Char]]
+; CHECK: OpBitwiseOr %[[#Char]]
+; CHECK: OpBitwiseXor %[[#Char]]
+; CHECK: OpBitwiseAnd %[[#Vec2Char]]
+; CHECK: OpBitwiseOr %[[#Vec2Char]]
+; CHECK: OpBitwiseXor %[[#Vec2Char]]
+
+; CHECK: OpLogicalAnd %[[#Bool]]
+
+; CHECK: OpLogicalAnd %[[#Bool]]
+; CHECK: OpLogicalOr %[[#Bool]]
+; CHECK: OpLogicalNotEqual %[[#Bool]]
+; CHECK: OpLogicalAnd %[[#Vec2Bool]]
+; CHECK: OpLogicalOr %[[#Vec2Bool]]
+; CHECK: OpLogicalNotEqual %[[#Vec2Bool]]
+
+define void @test1(i8 noundef %arg1, i8 noundef %arg2) {
+ %cond1 = and i8 %arg1, %arg2
+ %cond2 = or i8 %arg1, %arg2
+ %cond3 = xor i8 %arg1, %arg2
+ ret void
+}
+
+define void @test1v(<2 x i8> noundef %arg1, <2 x i8> noundef %arg2) {
+ %cond1 = and <2 x i8> %arg1, %arg2
+ %cond2 = or <2 x i8> %arg1, %arg2
+ %cond3 = xor <2 x i8> %arg1, %arg2
+ ret void
+}
+
+define void @test2(float noundef %real, float noundef %imag) {
+entry:
+ %realabs = tail call spir_func noundef float @_Z16__spirv_ocl_fabsf(float noundef %real)
+ %cond1 = fcmp oeq float %realabs, 1.000000e+00
+ %cond2 = fcmp oeq float %imag, 0.000000e+00
+ %cond3 = and i1 %cond1, %cond2
+ br i1 %cond3, label %midlbl, label %cleanup
+midlbl:
+ br label %cleanup
+cleanup:
+ ret void
+}
+
+define void @test3(i1 noundef %arg1, i1 noundef %arg2) {
+ %cond1 = and i1 %arg1, %arg2
+ %cond2 = or i1 %arg1, %arg2
+ %cond3 = xor i1 %arg1, %arg2
+ ret void
+}
+
+define void @test3v(<2 x i1> noundef %arg1, <2 x i1> noundef %arg2) {
+ %cond1 = and <2 x i1> %arg1, %arg2
+ %cond2 = or <2 x i1> %arg1, %arg2
+ %cond3 = xor <2 x i1> %arg1, %arg2
+ ret void
+}
+
+declare dso_local spir_func noundef float @_Z16__spirv_ocl_fabsf(float noundef)
diff --git a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
index 641e2bf..31cd8bd 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
@@ -1,7 +1,13 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --translator-compatibility-mode -o - | FileCheck %s --check-prefix=CHECK-COMPAT
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s --translator-compatibility-mode -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[EQ:%.*]] "test_eq"
; CHECK-DAG: OpName [[NE:%.*]] "test_ne"
+; CHECK-COMPAT-DAG: OpName [[EQ:%.*]] "test_eq"
+; CHECK-COMPAT-DAG: OpName [[NE:%.*]] "test_ne"
; CHECK-DAG: OpName [[ULT:%.*]] "test_ult"
; CHECK-DAG: OpName [[SLT:%.*]] "test_slt"
; CHECK-DAG: OpName [[ULE:%.*]] "test_ule"
@@ -19,6 +25,9 @@
; CHECK-NEXT: [[R:%.*]] = OpPtrEqual {{%.+}} [[A]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
+; CHECK-COMPAT: [[EQ]] = OpFunction
+; CHECK-COMPAT-NOT: OpPtrEqual
+; CHECK-COMPAT: OpFunctionEnd
define i1 @test_eq(i16* %a, i16* %b) {
%r = icmp eq i16* %a, %b
ret i1 %r
@@ -31,6 +40,9 @@ define i1 @test_eq(i16* %a, i16* %b) {
; CHECK-NEXT: [[R:%.*]] = OpPtrNotEqual {{%.+}} [[A]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
+; CHECK-COMPAT: [[NE]] = OpFunction
+; CHECK-COMPAT-NOT: OpPtrNotEqual
+; CHECK-COMPAT: OpFunctionEnd
define i1 @test_ne(i16* %a, i16* %b) {
%r = icmp ne i16* %a, %b
ret i1 %r
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
new file mode 100644
index 0000000..710a158
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
@@ -0,0 +1,25 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+
+; CHECK: OpFunction
+; CHECK: %[[FooArg:.*]] = OpVariable
+; CHECK: OpLifetimeStart %[[FooArg]], 0
+; CHECK: OpCopyMemorySized
+; CHECK: OpBitcast
+; CHECK: OpInBoundsPtrAccessChain
+; CHECK: OpLifetimeStop %[[FooArg]], 0
+
+%tprange = type { %tparray }
+%tparray = type { [2 x i64] }
+
+define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) {
+ %RoundedRangeKernel = alloca %tprange, align 8
+ call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel) #7
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel) #7
+ ret void
+}
+
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
index 93190f9..e0c84ee 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
index aa879b2..12a4a86 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
index a0d18d5..459bc6b 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
index 247ebcc..4f9cd29 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
index 13f4410..837bea0 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
index 13ef118..475da2e 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
index 93ef79a..b525c84 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
index afe30d5..0985be9 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
index 9b397ae..1a70057 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
index a62bb0c..90c6cf5 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
index 3fc2bcc..4551fa3 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
index 9459946..a0d257b 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
index cce7189..ba5dba7 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
index bcc49c5..e16bde8 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
index 26bc96b..cf887bb 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll
new file mode 100644
index 0000000..7fae6ca
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll
@@ -0,0 +1,37 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#TYCHAR:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#TYCHARPTR:]] = OpTypePointer Function %[[#TYCHAR]]
+; CHECK-DAG: %[[#TYINT32:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#TYSTRUCTINT32:]] = OpTypeStruct %[[#TYINT32]]
+; CHECK-DAG: %[[#TYARRAY:]] = OpTypeArray %[[#TYSTRUCTINT32]] %[[#]]
+; CHECK-DAG: %[[#TYSTRUCT:]] = OpTypeStruct %[[#TYARRAY]]
+; CHECK-DAG: %[[#TYSTRUCTPTR:]] = OpTypePointer Function %[[#TYSTRUCT]]
+; CHECK-DAG: %[[#TYINT64:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#TYINT64PTR:]] = OpTypePointer Function %[[#TYINT64]]
+; CHECK: OpFunction
+; CHECK: %[[#PTRTOSTRUCT:]] = OpFunctionParameter %[[#TYSTRUCTPTR]]
+; CHECK: %[[#PTRTOCHAR:]] = OpBitcast %[[#TYCHARPTR]] %[[#PTRTOSTRUCT]]
+; CHECK-NEXT: OpInBoundsPtrAccessChain %[[#TYCHARPTR]] %[[#PTRTOCHAR]]
+; CHECK: OpFunction
+; CHECK: %[[#PTRTOSTRUCT2:]] = OpFunctionParameter %[[#TYSTRUCTPTR]]
+; CHECK: %[[#ELEM:]] = OpInBoundsPtrAccessChain %[[#TYSTRUCTPTR]] %[[#PTRTOSTRUCT2]]
+; CHECK-NEXT: %[[#TOLOAD:]] = OpBitcast %[[#TYINT64PTR]] %[[#ELEM]]
+; CHECK-NEXT: OpLoad %[[#TYINT64]] %[[#TOLOAD]]
+
+%struct.S = type { i32 }
+%struct.__wrapper_class = type { [7 x %struct.S] }
+
+define spir_kernel void @foo1(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) {
+entry:
+ %elem = getelementptr inbounds i8, ptr %_arg_Arr, i64 0
+ ret void
+}
+
+define spir_kernel void @foo2(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) {
+entry:
+ %elem = getelementptr inbounds %struct.__wrapper_class, ptr %_arg_Arr, i64 0
+ %data = load i64, ptr %elem
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll
index a30d079..18752fd 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll
@@ -9,7 +9,7 @@
; CHECK-DAG: %[[#TYLONGPTR:]] = OpTypePointer Function %[[#TYLONG]]
; CHECK: %[[#PTRTOSTRUCT:]] = OpFunctionParameter %[[#TYSTRUCTPTR]]
; CHECK: %[[#PTRTOLONG:]] = OpBitcast %[[#TYLONGPTR]] %[[#PTRTOSTRUCT]]
-; CHECK: OpLoad %[[#TYLONG]] %[[#PTRTOLONG]]
+; CHECK-NEXT: OpLoad %[[#TYLONG]] %[[#PTRTOLONG]]
%struct.S = type { i32 }
%struct.__wrapper_class = type { [7 x %struct.S] }
diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll
index 4701f02..202bcfb 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-store.ll
@@ -13,7 +13,7 @@
; CHECK: %[[#OBJ:]] = OpFunctionParameter %[[#TYSTRUCT]]
; CHECK: %[[#ARGPTR2:]] = OpFunctionParameter %[[#TYLONGPTR]]
; CHECK: %[[#PTRTOSTRUCT:]] = OpBitcast %[[#TYSTRUCTPTR]] %[[#ARGPTR2]]
-; CHECK: OpStore %[[#PTRTOSTRUCT]] %[[#OBJ]]
+; CHECK-NEXT: OpStore %[[#PTRTOSTRUCT]] %[[#OBJ]]
%struct.S = type { i32 }
%struct.__wrapper_class = type { [7 x %struct.S] }
diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll
index 062863a..7e9c621 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#INT8:]] = OpTypeInt 8 0
; CHECK: %[[#PTR1:]] = OpTypePointer CrossWorkgroup %[[#INT8]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll
index aaf97f8..fc999ba 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#FLOAT32:]] = OpTypeFloat 32
; CHECK: %[[#PTR:]] = OpTypePointer CrossWorkgroup %[[#FLOAT32]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll
index 6d12023..a3a730a 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-addressspace.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32 0
; CHECK-DAG: %[[#PTR1:]] = OpTypePointer Function %[[#INT]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll
index 9e136ce..b74a344 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type-deduction-no-bitcast-to-generic.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#IMAGE:]] = OpTypeImage %2 2D 0 0 0 0 Unknown ReadOnly
diff --git a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll
index 1fcc6d9..b8f205a 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/kernel-argument-pointer-type.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#FLOAT32:]] = OpTypeFloat 32
; CHECK-DAG: %[[#PTR1:]] = OpTypePointer Function %[[#FLOAT32]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll
index 1b4e7a3..1667abc 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#INT8:]] = OpTypeInt 8 0
; CHECK: %[[#PTR1:]] = OpTypePointer CrossWorkgroup %[[#INT8]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll b/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll
new file mode 100644
index 0000000..77b895c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll
@@ -0,0 +1,20 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-NOT: OpTypeInt 8 0
+
+@GI = addrspace(1) constant i64 42
+
+@GS = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GI, ptr addrspace(1) @GI }
+@GS2 = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GS, ptr addrspace(1) @GS }
+@GS3 = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GS2, ptr addrspace(1) @GS2 }
+
+@GPS = addrspace(1) global ptr addrspace(1) @GS3
+
+@GPI1 = addrspace(1) global ptr addrspace(1) @GI
+@GPI2 = addrspace(1) global ptr addrspace(1) @GPI1
+@GPI3 = addrspace(1) global ptr addrspace(1) @GPI2
+
+define spir_kernel void @foo() {
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll b/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll
index 00b03c0..3a0d65e 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/store-operand-ptr-to-struct.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO: OpFunctionParameter should be a pointer of struct base type.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
index 86f5f5b..6d4913f 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
@@ -1,14 +1,14 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK: %[[TyInt8:.*]] = OpTypeInt 8 0
-; CHECK: %[[TyInt8Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt8]]
-; CHECK: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt8Ptr]] %[[TyInt8Ptr]]
+; CHECK: %[[TyInt64:.*]] = OpTypeInt 64 0
+; CHECK: %[[TyInt64Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt64]]
+; CHECK: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt64Ptr]] %[[TyInt64Ptr]]
; CHECK: %[[ConstStruct:.*]] = OpConstantComposite %[[TyStruct]] %[[ConstField:.*]] %[[ConstField]]
; CHECK: %[[TyStructPtr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyStruct]]
; CHECK: OpVariable %[[TyStructPtr]] {{[a-zA-Z]+}} %[[ConstStruct]]
-@a = addrspace(1) constant i32 123
+@a = addrspace(1) constant i64 42
@struct = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @a, ptr addrspace(1) @a }
define spir_kernel void @foo() {
diff --git a/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll b/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
index 52180d5..23c3faa 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32
; CHECK-DAG: %[[#GLOBAL_PTR_INT:]] = OpTypePointer CrossWorkgroup %[[#INT]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll b/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
index 473c2a8..83234e3 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
; CHECK-DAG: %[[#pointer:]] = OpTypePointer CrossWorkgroup %[[#float]]
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll
new file mode 100644
index 0000000..ae7fb99
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args-rev.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[ArgToDeduce:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[ArgToDeduce]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[ArgToDeduce]]
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+
+define spir_kernel void @bar(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %unknown_type_ptr, i64 0
+ call void @foo(ptr addrspace(1) %unknown_type_ptr)
+ ret void
+}
+
+define void @foo(ptr addrspace(1) %known_type_ptr) {
+entry:
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll
new file mode 100644
index 0000000..ee411f2
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-args.ll
@@ -0,0 +1,97 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[BarArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: OpName %[[UntypedArg:.*]] "arg"
+; CHECK-SPIRV-DAG: OpName %[[FunUntypedArg:.*]] "foo_untyped_arg"
+; CHECK-SPIRV-DAG: OpName %[[UnusedArg1:.*]] "unused_arg1"
+; CHECK-SPIRV-DAG: OpName %[[Foo2Arg:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo2:.*]] "foo2"
+; CHECK-SPIRV-DAG: OpName %[[Bar2Arg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar2:.*]] "bar2"
+; CHECK-SPIRV-DAG: OpName %[[Foo5Arg1:.*]] "unknown_type_ptr1"
+; CHECK-SPIRV-DAG: OpName %[[Foo5Arg2:.*]] "unknown_type_ptr2"
+; CHECK-SPIRV-DAG: OpName %[[Foo5:.*]] "foo5"
+; CHECK-SPIRV-DAG: OpName %[[Bar5Arg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar5:.*]] "bar5"
+; CHECK-SPIRV-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[HalfConst:.*]] = OpConstant %[[Half]] 15360
+; CHECK-SPIRV-DAG: %[[CharPtr:.*]] = OpTypePointer CrossWorkgroup %[[Char]]
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV-DAG: %[[Fun2:.*]] = OpTypeFunction %[[Void]] %[[Half]] %[[LongPtr]]
+; CHECK-SPIRV-DAG: %[[Fun5:.*]] = OpTypeFunction %[[Void]] %[[Half]] %[[LongPtr]] %[[Half]] %[[LongPtr]] %[[Half]]
+; CHECK-SPIRV-DAG: %[[FunUntyped:.*]] = OpTypeFunction %[[Void]] %[[CharPtr]]
+
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[BarArg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[BarArg]]
+
+; CHECK-SPIRV: %[[FunUntypedArg]] = OpFunction %[[Void]] None %[[FunUntyped]]
+; CHECK-SPIRV: %[[UntypedArg]] = OpFunctionParameter %[[CharPtr]]
+
+; CHECK-SPIRV: %[[Foo2]] = OpFunction %[[Void]] None %[[Fun2]]
+; CHECK-SPIRV: %[[UnusedArg1]] = OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Foo2Arg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar2]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[Bar2Arg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo2]] %[[HalfConst]] %[[Bar2Arg]]
+
+; CHECK-SPIRV: %[[Foo5]] = OpFunction %[[Void]] None %[[Fun5]]
+; CHECK-SPIRV: OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Foo5Arg1]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Foo5Arg2]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionParameter %[[Half]]
+; CHECK-SPIRV: %[[Bar5]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[Bar5Arg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo5]] %[[HalfConst]] %[[Bar5Arg]] %[[HalfConst]] %[[Bar5Arg]] %[[HalfConst]]
+
+define void @foo(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ ret void
+}
+
+define spir_kernel void @bar(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ call void @foo(ptr addrspace(1) %known_type_ptr)
+ ret void
+}
+
+define void @foo_untyped_arg(ptr addrspace(1) %arg) {
+entry:
+ ret void
+}
+
+define void @foo2(half %unused_arg1, ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ ret void
+}
+
+define spir_kernel void @bar2(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ call void @foo2(half 1.0, ptr addrspace(1) %known_type_ptr)
+ ret void
+}
+
+define void @foo5(half %unused_arg1, ptr addrspace(1) %unknown_type_ptr1, half %unused_arg2, ptr addrspace(1) %unknown_type_ptr2, half %unused_arg3) {
+entry:
+ ret void
+}
+
+define spir_kernel void @bar5(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ call void @foo5(half 1.0, ptr addrspace(1) %known_type_ptr, half 1.0, ptr addrspace(1) %known_type_ptr, half 1.0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll
new file mode 100644
index 0000000..1071d34
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll
@@ -0,0 +1,57 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[ArgCum:.*]] "_arg_cum"
+; CHECK-SPIRV-DAG: OpName %[[FunTest:.*]] "test"
+; CHECK-SPIRV-DAG: OpName %[[Addr:.*]] "addr"
+; CHECK-SPIRV-DAG: OpName %[[StubObj:.*]] "stub_object"
+; CHECK-SPIRV-DAG: OpName %[[MemOrder:.*]] "mem_order"
+; CHECK-SPIRV-DAG: OpName %[[FooStub:.*]] "foo_stub"
+; CHECK-SPIRV-DAG: OpName %[[FooObj:.*]] "foo_object"
+; CHECK-SPIRV-DAG: OpName %[[FooMemOrder:.*]] "mem_order"
+; CHECK-SPIRV-DAG: OpName %[[FooFunc:.*]] "foo"
+; CHECK-SPIRV-DAG: %[[TyLong:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[TyVoid:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[TyPtrLong:.*]] = OpTypePointer CrossWorkgroup %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[TyFunPtrLong:.*]] = OpTypeFunction %[[TyVoid]] %[[TyPtrLong]]
+; CHECK-SPIRV-DAG: %[[TyGenPtrLong:.*]] = OpTypePointer Generic %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[TyFunGenPtrLongLong:.*]] = OpTypeFunction %[[TyVoid]] %[[TyGenPtrLong]] %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[Const3:.*]] = OpConstant %[[TyLong]] 3
+; CHECK-SPIRV: %[[FunTest]] = OpFunction %[[TyVoid]] None %[[TyFunPtrLong]]
+; CHECK-SPIRV: %[[ArgCum]] = OpFunctionParameter %[[TyPtrLong]]
+; CHECK-SPIRV: OpFunctionCall %[[TyVoid]] %[[FooFunc]] %[[Addr]] %[[Const3]]
+; CHECK-SPIRV: %[[FooStub]] = OpFunction %[[TyVoid]] None %[[TyFunGenPtrLongLong]]
+; CHECK-SPIRV: %[[StubObj]] = OpFunctionParameter %[[TyGenPtrLong]]
+; CHECK-SPIRV: %[[MemOrder]] = OpFunctionParameter %[[TyLong]]
+; CHECK-SPIRV: %[[FooFunc]] = OpFunction %[[TyVoid]] None %[[TyFunGenPtrLongLong]]
+; CHECK-SPIRV: %[[FooObj]] = OpFunctionParameter %[[TyGenPtrLong]]
+; CHECK-SPIRV: %[[FooMemOrder]] = OpFunctionParameter %[[TyLong]]
+; CHECK-SPIRV: OpFunctionCall %[[TyVoid]] %[[FooStub]] %[[FooObj]] %[[FooMemOrder]]
+
+define spir_kernel void @test(ptr addrspace(1) noundef align 4 %_arg_cum) {
+entry:
+ %lptr = getelementptr inbounds i32, ptr addrspace(1) %_arg_cum, i64 1
+ %addr = addrspacecast ptr addrspace(1) %lptr to ptr addrspace(4)
+ %object = bitcast ptr addrspace(4) %addr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %object, i32 3)
+ %halfptr = getelementptr inbounds half, ptr addrspace(1) %_arg_cum, i64 1
+ %halfaddr = addrspacecast ptr addrspace(1) %halfptr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %halfaddr, i32 3)
+ %dblptr = getelementptr inbounds double, ptr addrspace(1) %_arg_cum, i64 1
+ %dbladdr = addrspacecast ptr addrspace(1) %dblptr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %dbladdr, i32 3)
+ ret void
+}
+
+define void @foo_stub(ptr addrspace(4) noundef %stub_object, i32 noundef %mem_order) {
+entry:
+ %object.addr = alloca ptr addrspace(4)
+ %object.addr.ascast = addrspacecast ptr %object.addr to ptr addrspace(4)
+ store ptr addrspace(4) %stub_object, ptr addrspace(4) %object.addr.ascast
+ ret void
+}
+
+define void @foo(ptr addrspace(4) noundef %foo_object, i32 noundef %mem_order) {
+ tail call void @foo_stub(ptr addrspace(4) noundef %foo_object, i32 noundef %mem_order)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll
new file mode 100644
index 0000000..ea7a22c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-complex.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[Struct:.*]] = OpTypeStruct %[[Long]]
+; CHECK-SPIRV-DAG: %[[StructPtr:.*]] = OpTypePointer Generic %[[Struct]]
+; CHECK-SPIRV-DAG: %[[Function:.*]] = OpTypeFunction %[[Void]] %[[StructPtr]]
+; CHECK-SPIRV-DAG: %[[Const:.*]] = OpConstantNull %[[Struct]]
+; CHECK-SPIRV-DAG: %[[CrossStructPtr:.*]] = OpTypePointer CrossWorkgroup %[[Struct]]
+; CHECK-SPIRV-DAG: %[[Var:.*]] = OpVariable %[[CrossStructPtr]] CrossWorkgroup %[[Const]]
+; CHECK-SPIRV: %[[Foo:.*]] = OpFunction %[[Void]] None %[[Function]]
+; CHECK-SPIRV-NEXT: OpFunctionParameter %[[StructPtr]]
+; CHECK-SPIRV: %[[Casted:.*]] = OpPtrCastToGeneric %[[StructPtr]] %[[Var]]
+; CHECK-SPIRV-NEXT: OpFunctionCall %[[Void]] %[[Foo]] %[[Casted]]
+
+%struct.global_ctor_dtor = type { i32 }
+@g1 = addrspace(1) global %struct.global_ctor_dtor zeroinitializer
+
+define linkonce_odr spir_func void @foo(ptr addrspace(4) %this) {
+entry:
+ ret void
+}
+
+define internal spir_func void @bar() {
+entry:
+ call spir_func void @foo(ptr addrspace(4) addrspacecast (ptr addrspace(1) @g1 to ptr addrspace(4)))
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll
new file mode 100644
index 0000000..76769ab
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-rev.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[ArgToDeduce:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[ArgToDeduce]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[ArgToDeduce]]
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+
+define spir_kernel void @bar(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ call spir_func void @foo(ptr addrspace(1) %unknown_type_ptr)
+ ret void
+}
+
+define void @foo(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll
new file mode 100644
index 0000000..8cbf360
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[FooArg:.*]] "known_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Foo:.*]] "foo"
+; CHECK-SPIRV-DAG: OpName %[[ArgToDeduce:.*]] "unknown_type_ptr"
+; CHECK-SPIRV-DAG: OpName %[[Bar:.*]] "bar"
+; CHECK-SPIRV-DAG: %[[Long:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[Void:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[LongPtr:.*]] = OpTypePointer CrossWorkgroup %[[Long]]
+; CHECK-SPIRV-DAG: %[[Fun:.*]] = OpTypeFunction %[[Void]] %[[LongPtr]]
+; CHECK-SPIRV: %[[Foo]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[FooArg]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: %[[Bar]] = OpFunction %[[Void]] None %[[Fun]]
+; CHECK-SPIRV: %[[ArgToDeduce]] = OpFunctionParameter %[[LongPtr]]
+; CHECK-SPIRV: OpFunctionCall %[[Void]] %[[Foo]] %[[ArgToDeduce]]
+
+define void @foo(ptr addrspace(1) %known_type_ptr) {
+entry:
+ %elem = getelementptr inbounds i32, ptr addrspace(1) %known_type_ptr, i64 0
+ ret void
+}
+
+define spir_kernel void @bar(ptr addrspace(1) %unknown_type_ptr) {
+entry:
+ call spir_func void @foo(ptr addrspace(1) %unknown_type_ptr)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll b/llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll
new file mode 100644
index 0000000..f144418
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/typeof-ptr-int.ll
@@ -0,0 +1,29 @@
+; This test is to check that two functions have different SPIR-V type
+; definitions, even though their LLVM function types are identical.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[Fun32:.*]] "tp_arg_i32"
+; CHECK-DAG: OpName %[[Fun64:.*]] "tp_arg_i64"
+; CHECK-DAG: %[[TyI32:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[TyVoid:.*]] = OpTypeVoid
+; CHECK-DAG: %[[TyPtr32:.*]] = OpTypePointer Function %[[TyI32]]
+; CHECK-DAG: %[[TyFun32:.*]] = OpTypeFunction %[[TyVoid]] %[[TyPtr32]]
+; CHECK-DAG: %[[TyI64:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[TyPtr64:.*]] = OpTypePointer Function %[[TyI64]]
+; CHECK-DAG: %[[TyFun64:.*]] = OpTypeFunction %[[TyVoid]] %[[TyPtr64]]
+; CHECK-DAG: %[[Fun32]] = OpFunction %[[TyVoid]] None %[[TyFun32]]
+; CHECK-DAG: %[[Fun64]] = OpFunction %[[TyVoid]] None %[[TyFun64]]
+
+define spir_kernel void @tp_arg_i32(ptr %ptr) {
+entry:
+ store i32 1, ptr %ptr
+ ret void
+}
+
+define spir_kernel void @tp_arg_i64(ptr %ptr) {
+entry:
+ store i64 1, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/relationals.ll b/llvm/test/CodeGen/SPIRV/relationals.ll
index 1644dc7..f4fcf4d 100644
--- a/llvm/test/CodeGen/SPIRV/relationals.ll
+++ b/llvm/test/CodeGen/SPIRV/relationals.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
declare dso_local spir_func <4 x i8> @_Z13__spirv_IsNanIDv4_aDv4_fET_T0_(<4 x float>)
declare dso_local spir_func <4 x i8> @_Z13__spirv_IsInfIDv4_aDv4_fET_T0_(<4 x float>)
diff --git a/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
index 329399b..2ea5c76 100644
--- a/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
+++ b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
@@ -1,5 +1,6 @@
; RUN: llc -mtriple=spirv-unknown-unknown -O0 %s -o - | FileCheck %s
+; CHECK-DAG: OpDecorate %[[#SubgroupLocalInvocationId:]] BuiltIn SubgroupLocalInvocationId
; CHECK-DAG: %[[#bool:]] = OpTypeBool
; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
@@ -37,10 +38,10 @@ l1_continue:
; CHECK-NEXT: OpBranch %[[#l1_header]]
l1_end:
- %call = call spir_func i32 @_Z3absi(i32 0) [ "convergencectrl"(token %tl1) ]
+ %call = call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %tl1) ]
br label %end
; CHECK-DAG: %[[#l1_end]] = OpLabel
-; CHECK-DAG: %[[#]] = OpFunctionCall
+; CHECK-DAG: %[[#]] = OpLoad %[[#]] %[[#SubgroupLocalInvocationId]]
; CHECK-NEXT: OpBranch %[[#end:]]
l2:
@@ -76,6 +77,4 @@ declare token @llvm.experimental.convergence.entry()
declare token @llvm.experimental.convergence.control()
declare token @llvm.experimental.convergence.loop()
-; This intrinsic is not convergent. This is only because the backend doesn't
-; support convergent operations yet.
-declare spir_func i32 @_Z3absi(i32) convergent
+declare i32 @__hlsl_wave_get_lane_index() convergent
diff --git a/llvm/test/CodeGen/SPIRV/simple.ll b/llvm/test/CodeGen/SPIRV/simple.ll
index de9efa8..63c1596 100644
--- a/llvm/test/CodeGen/SPIRV/simple.ll
+++ b/llvm/test/CodeGen/SPIRV/simple.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Support of doubles is required.
; CHECK: OpCapability Float64
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
index fdb26ba..e0c4779 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --mattr=+spirv1.3 %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --mattr=+spirv1.3 %s -o - -filetype=obj | spirv-val %}
;; __kernel void testAtomicCompareExchangeExplicit_cl20(
;; volatile global atomic_int* object,
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll b/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
index 55161e6..11b0578 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: OpDecorate %[[#FUNC_NAME:]] LinkageAttributes "_Z10BitReversei"
; CHECK-NOT: OpBitReverse
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
index 95f3673..b63c1c6 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: %[[#]] = OpBuildNDRange %[[#]] %[[#GWS:]] %[[#LWS:]] %[[#GWO:]]
; CHECK-SPIRV-DAG: %[[#GWS]] = OpConstant %[[#]] 123
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
index a2ae808..65c992c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
@@ -19,6 +19,7 @@
;; bash$ $PATH_TO_GEN/bin/clang -cc1 -x cl -cl-std=CL2.0 -triple spir64-unknown-unknown -emit-llvm -include opencl-20.h BuildNDRange_2.cl -o BuildNDRange_2.ll
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
index 3403695..93aecc5 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; kernel void testConvertPtrToU(global int *a, global unsigned long *res) {
;; res[0] = (unsigned long)&a[0];
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll b/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
index 2e9b4a4..d4fc5c3 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpDecorate %[[#ALIGNMENT:]] Alignment 16
; CHECK-SPIRV: %[[#ALIGNMENT]] = OpFunctionParameter %[[#]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll b/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
index 64f25b7..966d835 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#PTR_ID:]] "ptr"
; CHECK-SPIRV: OpName %[[#PTR2_ID:]] "ptr2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll b/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
index 2f423c2..67c3380 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: %[[#int:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#int2:]] = OpTypeVector %[[#int]] 2
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll b/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll
index 6d6dd24..6e8726c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/ExecutionMode_SPIR_to_SPIRV.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: OpEntryPoint Kernel %[[#WORKER:]] "worker"
; CHECK-SPIRV-DAG: OpExecutionMode %[[#WORKER]] LocalSizeHint 128 10 1
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll b/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
index 2796dcb..33bece5 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpDecorate %[[#]] UserSemantic "annotation_on_function"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
index 02d1250..e405ef0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
@@ -7,7 +7,7 @@
;;
;; Positive tests:
;;
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE
;;
;; Negative tests:
;;
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
index 331960c..417b89e 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; atomic_cmpxchg OpenCL C 1.2 built-in function [1] into corresponding SPIR-V
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
index 95eb6ad..3180b57 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; legacy atomic OpenCL C 1.2 built-in functions [1] into corresponding SPIR-V
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll
index 0f3a62a..c94c130 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_work_item_fence.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; atomic_work_item_fence OpenCL C 2.0 built-in function [1] into corresponding
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll
index a126d94..cf4a247 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/barrier.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; barrier OpenCL C 1.2 built-in function [1] into corresponding SPIR-V
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
index 42b127c..5d9840d 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpCapability GroupNonUniformBallot
; CHECK-SPIRV: OpDecorate %[[#]] BuiltIn SubgroupGtMask
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll
index 0874e6f..0702fd0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/work_group_barrier.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks that the backend is capable to correctly translate
;; sub_group_barrier built-in function [1] from cl_khr_subgroups extension into
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll b/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll
index 3c563d3..20204ac 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/atomic_flag.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Types:
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll b/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
index d013abc..3e5a3ac 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Check 'LLVM ==> SPIR-V' conversion of atomic_load and atomic_store.
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll b/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
index 8dbf4d2..2c0fc39 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Check the bitcast is translated back to bitcast
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll b/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
index 5ecd7f7..2249cbe 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV1_4
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll
index 9b1ce76..0a02a8b 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_calls.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: OpDecorate %[[#Id:]] BuiltIn GlobalInvocationId
; CHECK-SPIRV-DAG: OpDecorate %[[#Id:]] BuiltIn GlobalLinearId
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
index 8286671..5074893 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpDecorate %[[#Id:]] BuiltIn GlobalLinearId
; CHECK-SPIRV: %[[#Id:]] = OpVariable %[[#]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
index 22aa40c..d0c4dff 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; The IR was generated from the following source:
;; #include <CL/sycl.hpp>
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
index 5b3474f..3885f07 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-linux %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; The IR was generated from the following source:
;; #include <CL/sycl.hpp>
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll b/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll
index 6de610b..824ca1b2 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/check_ro_qualifier.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: %[[#IMAGE_TYPE:]] = OpTypeImage
; CHECK-SPIRV: %[[#IMAGE_ARG:]] = OpFunctionParameter %[[#IMAGE_TYPE]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
index 52b7dac..d7e87c0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
@@ -19,6 +19,7 @@
;; }
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: OpCapability Sampled1D
; CHECK-SPIRV-DAG: OpCapability SampledBuffer
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll b/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
index 9054454..0cd75bb 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpTypeDeviceEvent
; CHECK-SPIRV: OpFunction
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll b/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
index cf124ec..d23b068 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll b/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
index c186a81..49b84c1 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpSatConvertSToU
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll b/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
index fd29bc8..0ed1dc7 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll b/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
index 78d9a23..af76c0e 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll b/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
index cfdcc72..550ec1a 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] fclamp
; CHECK-SPIRV-NOT: %[[#]] = OpExtInst %[[#]] %[[#]] clamp
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll
index 572ccc3..46eaba9 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll b/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
index d0ed564..79b7868 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll b/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll
index f506787b..683b5c2 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fmod.ll
@@ -2,6 +2,7 @@
;; { out = fmod( in1, in2 ); }
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] fmod %[[#]] %[[#]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll b/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
index 886077a..fdab29c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll b/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
index e17601a..60bbfe6 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll b/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
index c035c35..974043c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-NOT: OpCapability FPFastMathModeINTEL
; CHECK-SPIRV: OpName %[[#mu:]] "mul"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/frem.ll b/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
index ecb8f6f..d36ba7f 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll b/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
index 99d0d0e..3677c00 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: OpName %[[#r1:]] "r1"
; CHECK-SPIRV: OpName %[[#r2:]] "r2"
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll b/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll
index dc307c7..fd24196 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/get_image_num_mip_levels.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; Types:
; CHECK-DAG: %[[#INT:]] = OpTypeInt 32
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll b/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
index 2f44e19..ff1bec4 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV1_4
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; TODO(#60133): Requires updates following opaque pointer migration.
; XFAIL: *
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll b/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
index 6aa9faa..2412f40 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-DAG: %[[#int:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#float:]] = OpTypeFloat 32
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll b/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
index 3c818af..c5f3f9e 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-NOT: OpSConvert
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
index f771854..de7673a 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks following SYCL relational builtins with double and double2
;; types:
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
index 1f55ceb..69a4a30 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks following SYCL relational builtins with float and float2
;; types:
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
index 864fb4f..d6a7fda 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
;; This test checks following SYCL relational builtins with half and half2 types:
;; isfinite, isinf, isnan, isnormal, signbit, isequal, isnotequal, isgreater
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
index 3551030..e0172ec 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
@@ -1,6 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
;
-; CHECK-SPIRV-DAG: %[[#i8:]] = OpTypeInt 8 0
; CHECK-SPIRV-DAG: %[[#i32:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#one:]] = OpConstant %[[#i32]] 1
; CHECK-SPIRV-DAG: %[[#two:]] = OpConstant %[[#i32]] 2
@@ -13,7 +12,6 @@
; CHECK-SPIRV: %[[#test_arr2:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
; CHECK-SPIRV: %[[#test_arr:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
-; CHECK-SPIRV-DAG: %[[#const_i8_ptr:]] = OpTypePointer UniformConstant %[[#i8]]
; CHECK-SPIRV-DAG: %[[#i32x3_ptr:]] = OpTypePointer Function %[[#i32x3]]
; CHECK-SPIRV: %[[#arr:]] = OpVariable %[[#i32x3_ptr]] Function
diff --git a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
index 3b308ce..adeec15b 100644
--- a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
+++ b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
@@ -25,6 +25,8 @@
name: autogen_SD21418
alignment: 4
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
registers:
- { id: 0, class: vr128bit }
- { id: 1, class: vr128bit }
diff --git a/llvm/test/CodeGen/SystemZ/atomic-load-06.ll b/llvm/test/CodeGen/SystemZ/atomic-load-06.ll
index c9c5504..60ff780 100644
--- a/llvm/test/CodeGen/SystemZ/atomic-load-06.ll
+++ b/llvm/test/CodeGen/SystemZ/atomic-load-06.ll
@@ -4,9 +4,7 @@
define float @f1(ptr %src) {
; CHECK-LABEL: f1:
-; CHECK: lgf [[R:%r[0-9]+]], 0(%r2)
-; CHECK: sllg [[R]], [[R]], 32
-; CHECK: ldgr %f0, [[R]]
+; CHECK: le %f0, 0(%r2)
; CHECK: br %r14
%val = load atomic float, ptr %src seq_cst, align 4
ret float %val
diff --git a/llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll b/llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll
new file mode 100644
index 0000000..8038329
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/atomic-memops-fp128.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+;
+; Test fpext of atomic loads to fp128 without VectorEnhancements1 (using FP register pairs).
+
+define fp128 @f1(ptr %src) {
+; CHECK-LABEL: f1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lxeb %f0, 0(%r3)
+; CHECK-NEXT: std %f0, 0(%r2)
+; CHECK-NEXT: std %f2, 8(%r2)
+; CHECK-NEXT: br %r14
+ %V = load atomic float, ptr %src seq_cst, align 4
+ %Res = fpext float %V to fp128
+ ret fp128 %Res
+}
+
+define fp128 @f2(ptr %src) {
+; CHECK-LABEL: f2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lxdb %f0, 0(%r3)
+; CHECK-NEXT: std %f0, 0(%r2)
+; CHECK-NEXT: std %f2, 8(%r2)
+; CHECK-NEXT: br %r14
+ %V = load atomic double, ptr %src seq_cst, align 8
+ %Res = fpext double %V to fp128
+ ret fp128 %Res
+}
+
+
+
diff --git a/llvm/test/CodeGen/SystemZ/atomic-memops.ll b/llvm/test/CodeGen/SystemZ/atomic-memops.ll
new file mode 100644
index 0000000..0bc647a
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/atomic-memops.ll
@@ -0,0 +1,739 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z16 | FileCheck %s
+
+; Sign-extending atomic loads.
+define void @f1(ptr %src, ptr %dst) {
+; CHECK-LABEL: f1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i16
+ store volatile i16 %s, ptr %dst
+ ret void
+}
+
+define void @f2(ptr %src, ptr %dst) {
+; CHECK-LABEL: f2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i32
+ store volatile i32 %s, ptr %dst
+ ret void
+}
+
+define void @f3(ptr %src, ptr %dst) {
+; CHECK-LABEL: f3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgb %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i64
+ store volatile i64 %s, ptr %dst
+ ret void
+}
+
+define void @f4(ptr %src, ptr %dst) {
+; CHECK-LABEL: f4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lh %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %s = sext i16 %b to i32
+ store volatile i32 %s, ptr %dst
+ ret void
+}
+
+define void @f5(ptr %src, ptr %dst) {
+; CHECK-LABEL: f5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgh %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %s = sext i16 %b to i64
+ store volatile i64 %s, ptr %dst
+ ret void
+}
+
+define void @f6(ptr %src, ptr %dst) {
+; CHECK-LABEL: f6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgf %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %s = sext i32 %b to i64
+ store volatile i64 %s, ptr %dst
+ ret void
+}
+
+; Zero-extending atomic loads.
+define void @f7(ptr %src, ptr %dst) {
+; CHECK-LABEL: f7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llc %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i16
+ store volatile i16 %z, ptr %dst
+ ret void
+}
+
+define void @f8(ptr %src, ptr %dst) {
+; CHECK-LABEL: f8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llc %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i32
+ store volatile i32 %z, ptr %dst
+ ret void
+}
+
+define void @f9(ptr %src, ptr %dst) {
+; CHECK-LABEL: f9:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i64
+ store volatile i64 %z, ptr %dst
+ ret void
+}
+
+define void @f10(ptr %src, ptr %dst) {
+; CHECK-LABEL: f10:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llh %r0, 0(%r2)
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %z = zext i16 %b to i32
+ store volatile i32 %z, ptr %dst
+ ret void
+}
+
+define void @f11(ptr %src, ptr %dst) {
+; CHECK-LABEL: f11:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgh %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %z = zext i16 %b to i64
+ store volatile i64 %z, ptr %dst
+ ret void
+}
+
+define void @f12(ptr %src, ptr %dst) {
+; CHECK-LABEL: f12:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgf %r0, 0(%r2)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %z = zext i32 %b to i64
+ store volatile i64 %z, ptr %dst
+ ret void
+}
+
+; reg/mem
+define i64 @f13(i64 %a, ptr %src) {
+; CHECK-LABEL: f13:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ag %r2, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i64, ptr %src seq_cst, align 8
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; reg/mem op with extension from memory.
+define i64 @f14(i64 %a, ptr %src) {
+; CHECK-LABEL: f14:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slgf %r2, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+define float @f15(float %f1, ptr %ptr, float %acc) {
+; CHECK-LABEL: f15:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maeb %f2, %f0, 0(%r2)
+; CHECK-NEXT: ldr %f0, %f2
+; CHECK-NEXT: br %r14
+ %f2 = load atomic float, ptr %ptr seq_cst, align 4
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
+
+define double @f15_b(ptr %src) {
+; CHECK-LABEL: f15_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ldeb %f0, 0(%r2)
+; CHECK-NEXT: br %r14
+ %V = load atomic float, ptr %src seq_cst, align 4
+ %Res = fpext float %V to double
+ ret double %Res
+}
+
+define fp128 @f15_c(ptr %src) {
+; CHECK-LABEL: f15_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lde %f0, 0(%r3)
+; CHECK-NEXT: ldebr %f0, %f0
+; CHECK-NEXT: wflld %v0, %f0
+; CHECK-NEXT: vst %v0, 0(%r2), 3
+; CHECK-NEXT: br %r14
+ %V = load atomic float, ptr %src seq_cst, align 4
+ %Res = fpext float %V to fp128
+ ret fp128 %Res
+}
+
+define fp128 @f15_d(ptr %src) {
+; CHECK-LABEL: f15_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld %f0, 0(%r3)
+; CHECK-NEXT: wflld %v0, %f0
+; CHECK-NEXT: vst %v0, 0(%r2), 3
+; CHECK-NEXT: br %r14
+ %V = load atomic double, ptr %src seq_cst, align 8
+ %Res = fpext double %V to fp128
+ ret fp128 %Res
+}
+
+; Do it twice for good measure given the involved DAG combines.
+define void @f16(ptr %src, ptr %dst) {
+; CHECK-LABEL: f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: lgbr %r1, %r0
+; CHECK-NEXT: stg %r1, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: lgbr %r1, %r0
+; CHECK-NEXT: stg %r1, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i64
+ %z = zext i8 %b to i64
+ store volatile i64 %s, ptr %dst
+ store volatile i64 %z, ptr %dst
+
+ %b2 = load atomic i8, ptr %src seq_cst, align 1
+ %s2 = sext i8 %b2 to i64
+ %z2 = zext i8 %b2 to i64
+ store volatile i64 %s2, ptr %dst
+ store volatile i64 %z2, ptr %dst
+
+ ret void
+}
+
+define void @f16_b(ptr %src, ptr %dst) {
+; CHECK-LABEL: f16_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lgb %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %s = sext i8 %b to i16
+ store volatile i16 %s, ptr %dst
+
+ %s2 = sext i8 %b to i64
+ store volatile i64 %s2, ptr %dst
+
+ ret void
+}
+
+define void @f16_c(ptr %src, ptr %dst) {
+; CHECK-LABEL: f16_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llgc %r0, 0(%r2)
+; CHECK-NEXT: sth %r0, 0(%r3)
+; CHECK-NEXT: stg %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %z = zext i8 %b to i16
+ store volatile i16 %z, ptr %dst
+
+ %z2 = zext i8 %b to i64
+ store volatile i64 %z2, ptr %dst
+
+ ret void
+}
+
+; Check that two i8 loads use a reg/reg op.
+define i8 @f16_d(ptr %src, ptr %src2) {
+; CHECK-LABEL: f16_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r2, 0(%r2)
+; CHECK-NEXT: lb %r0, 0(%r3)
+; CHECK-NEXT: ar %r2, %r0
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %b2 = load atomic i8, ptr %src2 seq_cst, align 1
+ %add = add i8 %b, %b2
+ ret i8 %add
+}
+
+; Binary operations on a byte in memory, with an atomic load.
+define void @f17(ptr %ptr) {
+; CHECK-LABEL: f17:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ni 0(%r2), 1
+; CHECK-NEXT: br %r14
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %xor = and i8 %val, -255
+ store i8 %xor, ptr %ptr
+ ret void
+}
+
+define void @f18(ptr %src) {
+; CHECK-LABEL: f18:
+; CHECK: # %bb.0:
+; CHECK-NEXT: oiy 4096(%r2), 1
+; CHECK-NEXT: br %r14
+ %ptr = getelementptr i8, ptr %src, i64 4096
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %xor = or i8 %val, -255
+ store i8 %xor, ptr %ptr
+ ret void
+}
+
+define void @f19(ptr %src) {
+; CHECK-LABEL: f19:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xi 4095(%r2), 1
+; CHECK-NEXT: br %r14
+ %ptr = getelementptr i8, ptr %src, i64 4095
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %xor = xor i8 %val, -255
+ store i8 %xor, ptr %ptr
+ ret void
+}
+
+; TM
+define double @f20(ptr %src, double %a, double %b) {
+; CHECK-LABEL: f20:
+; CHECK: # %bb.0:
+; CHECK-NEXT: tm 0(%r2), 1
+; CHECK-NEXT: je .LBB25_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: ldr %f2, %f0
+; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: ldr %f0, %f2
+; CHECK-NEXT: br %r14
+ %byte = load atomic i8, ptr %src seq_cst, align 1
+ %and = and i8 %byte, 1
+ %cmp = icmp eq i8 %and, 0
+ %res = select i1 %cmp, double %b, double %a
+ ret double %res
+}
+
+; vector load and replicate
+define void @f21(ptr %src, ptr %dst) {
+; CHECK-LABEL: f21:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepb %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i8, ptr %src seq_cst, align 1
+ %v = insertelement <16 x i8> undef, i8 %b, i32 1
+ store volatile <16 x i8> %v, ptr %dst
+ ret void
+}
+
+define void @f22(ptr %src, ptr %dst) {
+; CHECK-LABEL: f22:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlreph %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i16, ptr %src seq_cst, align 2
+ %v = insertelement <8 x i16> undef, i16 %b, i32 1
+ store volatile <8 x i16> %v, ptr %dst
+ ret void
+}
+
+define void @f23(ptr %src, ptr %dst) {
+; CHECK-LABEL: f23:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepf %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i32, ptr %src seq_cst, align 4
+ %v = insertelement <4 x i32> undef, i32 %b, i32 2
+ store volatile <4 x i32> %v, ptr %dst
+ ret void
+}
+
+define void @f24(ptr %src, ptr %dst) {
+; CHECK-LABEL: f24:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepg %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic i64, ptr %src seq_cst, align 8
+ %v = insertelement <2 x i64> undef, i64 %b, i32 0
+ store volatile <2 x i64> %v, ptr %dst
+ ret void
+}
+
+define void @f25(ptr %src, ptr %dst) {
+; CHECK-LABEL: f25:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepf %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic float, ptr %src seq_cst, align 4
+ %v = insertelement <4 x float> undef, float %b, i32 1
+ store volatile <4 x float> %v, ptr %dst
+ ret void
+}
+
+; Do *not* use vlrep for an extending load.
+define <4 x i32> @f25_c(ptr %ptr) {
+; CHECK-LABEL: f25_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lb %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r0
+; CHECK-NEXT: vrepf %v24, %v0, 1
+; CHECK-NEXT: br %r14
+ %L = load atomic i8, ptr %ptr seq_cst, align 4
+ %S = sext i8 %L to i32
+ %val = insertelement <4 x i32> undef, i32 %S, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+; Do *not* use vlrep if there is another scalar use.
+define <4 x i32> @f25_d(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: f25_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: l %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r0
+; CHECK-NEXT: vrepf %v24, %v0, 1
+; CHECK-NEXT: st %r0, 0(%r3)
+; CHECK-NEXT: br %r14
+ %L = load atomic i32, ptr %ptr seq_cst, align 4
+ store i32 %L, ptr %dst, align 4
+ %val = insertelement <4 x i32> undef, i32 %L, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+define void @f26(ptr %src, ptr %dst) {
+; CHECK-LABEL: f26:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlrepg %v0, 0(%r2)
+; CHECK-NEXT: vst %v0, 0(%r3), 3
+; CHECK-NEXT: br %r14
+ %b = load atomic double, ptr %src seq_cst, align 8
+ %v = insertelement <2 x double> undef, double %b, i32 0
+ store volatile <2 x double> %v, ptr %dst
+ ret void
+}
+
+; Vector Load logical element and zero.
+define <16 x i8> @f27(ptr %ptr) {
+; CHECK-LABEL: f27:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezb %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %ret = insertelement <16 x i8> zeroinitializer, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @f28(ptr %ptr) {
+; CHECK-LABEL: f28:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezh %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i16, ptr %ptr seq_cst, align 2
+ %ret = insertelement <8 x i16> zeroinitializer, i16 %val, i32 3
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @f29(ptr %ptr) {
+; CHECK-LABEL: f29:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i32, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 1
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @f30(ptr %ptr) {
+; CHECK-LABEL: f30:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezg %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i64, ptr %ptr seq_cst, align 8
+ %ret = insertelement <2 x i64> zeroinitializer, i64 %val, i32 0
+ ret <2 x i64> %ret
+}
+
+define <4 x i32> @f31(ptr %ptr) {
+; CHECK-LABEL: f31:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezlf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic i32, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 0
+ ret <4 x i32> %ret
+}
+
+define <4 x float> @f32(ptr %ptr) {
+; CHECK-LABEL: f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vllezlf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load atomic float, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x float> zeroinitializer, float %val, i32 0
+ ret <4 x float> %ret
+}
+
+; Vector Load element.
+define <16 x i8> @f33(<16 x i8> %val, ptr %ptr) {
+; CHECK-LABEL: f33:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vleb %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i8, ptr %ptr seq_cst, align 1
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 0
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @f34(<8 x i16> %val, ptr %ptr) {
+; CHECK-LABEL: f34:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vleh %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i16, ptr %ptr seq_cst, align 2
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 0
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @f35(<4 x i32> %val, ptr %ptr) {
+; CHECK-LABEL: f35:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vlef %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i32, ptr %ptr seq_cst, align 4
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 0
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @f36(<2 x i64> %val, ptr %ptr) {
+; CHECK-LABEL: f36:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vleg %v24, 0(%r2), 0
+; CHECK-NEXT: br %r14
+ %element = load atomic i64, ptr %ptr seq_cst, align 8
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test operation on memory involving atomic load and store.
+define void @f39(ptr %ptr) {
+; CHECK-LABEL: f39:
+; CHECK: # %bb.0:
+; CHECK-NEXT: oi 0(%r2), 1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
+ %or = or i8 %val, -255
+ store atomic i8 %or, ptr %ptr seq_cst, align 1
+ ret void
+}
+
+; Some atomic stores of immediates.
+define void @f40(ptr %ptr) {
+; CHECK-LABEL: f40:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mvi 0(%r2), 128
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i8 128, ptr %ptr seq_cst, align 1
+ ret void
+}
+
+define void @f41(ptr %ptr) {
+; CHECK-LABEL: f41:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mvhi 0(%r2), -1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i32 4294967295, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f42(ptr %ptr) {
+; CHECK-LABEL: f42:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mvhi 0(%r2), -1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i32 4294967295, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f43(ptr %ptr) {
+; CHECK-LABEL: f43:
+; CHECK: # %bb.0:
+; CHECK-NEXT: llihl %r0, 255
+; CHECK-NEXT: oilf %r0, 4294967295
+; CHECK-NEXT: stg %r0, 0(%r2)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic i64 1099511627775, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+define void @f44(ptr %ptr) {
+; CHECK-LABEL: f44:
+; CHECK: # %bb.0:
+; CHECK-NEXT: larl %r1, .LCPI49_0
+; CHECK-NEXT: ld %f0, 0(%r1)
+; CHECK-NEXT: std %f0, 0(%r2)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ store atomic double 0x3ff0000020000000, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+; Vector Store Element.
+define void @f45(<16 x i8> %val, ptr %ptr) {
+; CHECK-LABEL: f45:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteb %v24, 0(%r2), 0
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <16 x i8> %val, i32 0
+ store atomic i8 %element, ptr %ptr seq_cst, align 1
+ ret void
+}
+
+define void @f46(<8 x i16> %val, ptr %base) {
+; CHECK-LABEL: f46:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteh %v24, 4094(%r2), 5
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %ptr = getelementptr i16, ptr %base, i32 2047
+ %element = extractelement <8 x i16> %val, i32 5
+ store atomic i16 %element, ptr %ptr seq_cst, align 2
+ ret void
+}
+
+define void @f47(<4 x i32> %val, ptr %ptr) {
+; CHECK-LABEL: f47:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vstef %v24, 0(%r2), 3
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <4 x i32> %val, i32 3
+ store atomic i32 %element, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f48(<2 x i64> %val, ptr %ptr) {
+; CHECK-LABEL: f48:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteg %v24, 0(%r2), 1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <2 x i64> %val, i32 1
+ store atomic i64 %element, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+define void @f49(<4 x float> %val, ptr %ptr) {
+; CHECK-LABEL: f49:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vstef %v24, 0(%r2), 0
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <4 x float> %val, i32 0
+ store atomic float %element, ptr %ptr seq_cst, align 4
+ ret void
+}
+
+define void @f50(<2 x double> %val, ptr %ptr) {
+; CHECK-LABEL: f50:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsteg %v24, 0(%r2), 1
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %element = extractelement <2 x double> %val, i32 1
+ store atomic double %element, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+define void @f51(ptr %src, ptr %dst) {
+; CHECK-LABEL: f51:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lpq %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r1
+; CHECK-NEXT: vgmf %v1, 2, 8
+; CHECK-NEXT: aebr %f0, %f1
+; CHECK-NEXT: ste %f0, 0(%r3)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %atomic-load = load atomic i128, ptr %src seq_cst, align 16
+ %b0 = bitcast i128 %atomic-load to <4 x float>
+ %vecext = extractelement <4 x float> %b0, i64 0
+ %add = fadd float %vecext, 1.000000e+00
+ store atomic float %add, ptr %dst seq_cst, align 4
+ ret void
+}
+
+define void @f52(ptr %src, ptr %dst) {
+; CHECK-LABEL: f52:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lpq %r0, 0(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r1
+; CHECK-NEXT: vgmg %v1, 2, 11
+; CHECK-NEXT: adbr %f0, %f1
+; CHECK-NEXT: std %f0, 0(%r3)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %atomic-load = load atomic i128, ptr %src seq_cst, align 16
+ %b0 = bitcast i128 %atomic-load to <2 x double>
+ %vecext = extractelement <2 x double> %b0, i64 0
+ %add = fadd double %vecext, 1.000000e+00
+ store atomic double %add, ptr %dst seq_cst, align 8
+ ret void
+}
+
+define void @fun58(ptr %ptr, i64 %arg) {
+; CHECK-LABEL: fun58:
+; CHECK: # %bb.0:
+; CHECK-NEXT: st %r3, 0(%r2)
+; CHECK-NEXT: bcr 14, %r0
+; CHECK-NEXT: br %r14
+ %res = trunc i64 %arg to i32
+ store atomic i32 %res, ptr %ptr seq_cst, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/atomic-store-06.ll b/llvm/test/CodeGen/SystemZ/atomic-store-06.ll
index b748bfc..91e324b 100644
--- a/llvm/test/CodeGen/SystemZ/atomic-store-06.ll
+++ b/llvm/test/CodeGen/SystemZ/atomic-store-06.ll
@@ -6,10 +6,7 @@
define void @f1(ptr %src, float %val) {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $f0s killed $f0s def $f0d
-; CHECK-NEXT: lgdr %r0, %f0
-; CHECK-NEXT: srlg %r0, %r0, 32
-; CHECK-NEXT: st %r0, 0(%r2)
+; CHECK-NEXT: ste %f0, 0(%r2)
; CHECK-NEXT: bcr 15, %r0
; CHECK-NEXT: br %r14
store atomic float %val, ptr %src seq_cst, align 4
diff --git a/llvm/test/CodeGen/SystemZ/call-zos-01.ll b/llvm/test/CodeGen/SystemZ/call-zos-01.ll
index 7777686..fc7a85c 100644
--- a/llvm/test/CodeGen/SystemZ/call-zos-01.ll
+++ b/llvm/test/CodeGen/SystemZ/call-zos-01.ll
@@ -104,7 +104,7 @@ entry:
}
; CHECK-LABEL: call_double:
-; CHECK: larl [[GENREG:[0-9]+]], @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl [[GENREG:[0-9]+]], L#{{CPI[0-9]+_[0-9]+}}
; CHECK-NEXT: ld 0, 0([[GENREG]])
define double @call_double() {
entry:
@@ -113,7 +113,7 @@ entry:
}
; CHECK-LABEL: call_longdouble:
-; CHECK: larl [[GENREG:[0-9]+]], @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl [[GENREG:[0-9]+]], L#{{CPI[0-9]+_[0-9]+}}
; CHECK-NEXT: ld 0, 0([[GENREG]])
; CHECK-NEXT: ld 2, 8([[GENREG]])
define fp128 @call_longdouble() {
@@ -123,7 +123,7 @@ entry:
}
; CHECK-LABEL: call_floats0
-; CHECK: larl [[GENREG:[0-9]+]], @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl [[GENREG:[0-9]+]], L#{{CPI[0-9]+_[0-9]+}}
; CHECK-NEXT: ld 1, 0([[GENREG]])
; CHECK-NEXT: ld 3, 8([[GENREG]])
; CHECK: lxr 5, 0
@@ -146,7 +146,7 @@ entry:
}
; CHECK-LABEL: pass_float:
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: aeb 0, 0(1)
define float @pass_float(float %arg) {
entry:
@@ -155,7 +155,7 @@ entry:
}
; CHECK-LABEL: pass_double:
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: adb 0, 0(1)
define double @pass_double(double %arg) {
entry:
@@ -164,7 +164,7 @@ entry:
}
; CHECK-LABEL: pass_longdouble
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: lxdb 1, 0(1)
; CHECK: axbr 0, 1
define fp128 @pass_longdouble(fp128 %arg) {
@@ -174,7 +174,7 @@ entry:
}
; CHECK-LABEL: pass_floats0
-; CHECK: larl 1, @{{CPI[0-9]+_[0-9]+}}
+; CHECK: larl 1, L#{{CPI[0-9]+_[0-9]+}}
; CHECK: axbr 0, 4
; CHECK: axbr 1, 0
; CHECK: cxbr 1, 5
diff --git a/llvm/test/CodeGen/SystemZ/call-zos-i128.ll b/llvm/test/CodeGen/SystemZ/call-zos-i128.ll
index ccdac16..7754833 100644
--- a/llvm/test/CodeGen/SystemZ/call-zos-i128.ll
+++ b/llvm/test/CodeGen/SystemZ/call-zos-i128.ll
@@ -3,10 +3,10 @@
; RUN: llc < %s -mtriple=s390x-ibm-zos -mcpu=z13 | FileCheck %s
; CHECK-LABEL: call_i128:
-; CHECK-DAG: larl 1, @CPI0_0
+; CHECK-DAG: larl 1, L#CPI0_0
; CHECK-DAG: vl 0, 0(1), 3
; CHECK-DAG: vst 0, 2256(4), 3
-; CHECK-DAG: larl 1, @CPI0_1
+; CHECK-DAG: larl 1, L#CPI0_1
; CHECK-DAG: vl 0, 0(1), 3
; CHECK-DAG: vst 0, 2272(4), 3
; CHECK-DAG: la 1, 2288(4)
diff --git a/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll b/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
index bde59a6..81aedc1 100644
--- a/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
+++ b/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
@@ -88,13 +88,15 @@ entry:
ret i64 %retval
}
+;; TODO: The extra COPY after LGDR is unnecessary (machine-scheduler introduces the overlap).
; CHECK-LABEL: call_vararg_both0:
; CHECK: stmg 6, 7, 1872(4)
; CHECK-NEXT: aghi 4, -192
; CHECK-NEXT: lg 6, 40(5)
; CHECK-NEXT: lg 5, 32(5)
+; CHECK-NEXT: lgdr 0, 0
; CHECK-NEXT: lgr 2, 1
-; CHECK-NEXT: lgdr 1, 0
+; CHECK-NEXT: lgr 1, 0
; CHECK-NEXT: basr 7, 6
; CHECK-NEXT: bcr 0, 0
; CHECK-NEXT: lg 7, 2072(4)
@@ -108,7 +110,7 @@ define i64 @call_vararg_both0(i64 %arg0, double %arg1) {
; CHECK-LABEL: call_vararg_long_double0:
; CHECK: stmg 6, 7, 1872(4)
; CHECK-NEXT: aghi 4, -192
-; CHECK-NEXT: larl 1, @CPI5_0
+; CHECK-NEXT: larl 1, L#CPI5_0
; CHECK-NEXT: ld 0, 0(1)
; CHECK-NEXT: ld 2, 8(1)
; CHECK-NEXT: lg 6, 8(5)
@@ -202,7 +204,7 @@ define void @call_vec_vararg_test0(<2 x double> %v) {
}
; ARCH12-LABEL: call_vec_vararg_test1
-; ARCH12: larl 1, @CPI10_0
+; ARCH12: larl 1, L#CPI10_0
; ARCH12: vl 0, 0(1), 3
; ARCH12: vlgvg 3, 24, 0
; ARCH12: vrepg 2, 0, 1
@@ -294,7 +296,7 @@ entry:
; CHECK-NEXT: aghi 4, -192
; CHECK-NEXT: lg 6, 72(5)
; CHECK-NEXT: lg 5, 64(5)
-; CHECK-NEXT: larl 1, @CPI17_0
+; CHECK-NEXT: larl 1, L#CPI17_0
; CHECK-NEXT: le 0, 0(1)
; CHECK-NEXT: llihf 0, 1073692672
; CHECK-NEXT: llihh 2, 16384
diff --git a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
index 7ff7d9b..197c3d8 100644
--- a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
+++ b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
@@ -157,6 +157,7 @@ registers:
- { id: 129, class: grx32bit }
- { id: 130, class: fp64bit }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-04.mir b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
index 97aa00f..ab4a14c 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-04.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
@@ -53,6 +53,7 @@ registers:
- { id: 10, class: gr64bit }
- { id: 11, class: gr32bit }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0 (%ir-block.1):
@@ -64,12 +65,10 @@ body: |
CHIMux %3, 0, implicit-def $cc
%0 = LOCRMux undef %0, %5, 14, 6, implicit $cc
%0 = LOCRMux %0, %2, 14, 6, implicit killed $cc
- ADJCALLSTACKDOWN 0, 0
%7 = LGFR %0
$r3d = LGHI 0
$r4d = COPY %7
CallBRASL @foo, undef $r2d, killed $r3d, killed $r4d, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def dead $r2d
- ADJCALLSTACKUP 0, 0
J %bb.1
...
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-08.mir b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
index 93aa5626..2ea67dc 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-08.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
@@ -116,6 +116,7 @@ registers:
- { id: 27, class: grx32bit }
- { id: 28, class: addr64bit }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.bb5:
@@ -154,9 +155,7 @@ body: |
J %bb.4
bb.4.bb33:
- ADJCALLSTACKDOWN 0, 0
CallBRASL @fun, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc
- ADJCALLSTACKUP 0, 0
STRL %4, @globvar :: (store (s32) into @globvar)
CLFIMux undef %23:grx32bit, 1, implicit-def $cc
%25:grx32bit = LHIMux 0
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
index 37e2980..8a7929c 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
@@ -30,6 +30,7 @@ registers:
- { id: 11, class: gr32bit }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -44,11 +45,9 @@ body: |
%11:gr32bit = SELRMux %8, %9:grx32bit, 14, 6, implicit killed $cc
CHIMux %6, 2, implicit-def $cc
%0:gr32bit = SELRMux %11, %5, 14, 8, implicit killed $cc
- ADJCALLSTACKDOWN 0, 0
%10:gr64bit = LGFR %0
$r2d = COPY %10
CallBRASL @foo, killed $r2d, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit $fpc
- ADJCALLSTACKUP 0, 0
J %bb.1
...
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
index e7e1eaf..009fd6c 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
@@ -192,6 +192,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%31' }
- { reg: '$r3d', virtual-reg: '%32' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.bb:
@@ -199,18 +200,12 @@ body: |
%32:gr64bit = COPY $r3d
%0:gr64bit = COPY $r2d
- ADJCALLSTACKDOWN 0, 0
CallBRASL @sre_malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d
%1:addr64bit = COPY $r2d
- ADJCALLSTACKUP 0, 0
- ADJCALLSTACKDOWN 0, 0
CallBRASL @sre_malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d
%2:addr64bit = COPY $r2d
- ADJCALLSTACKUP 0, 0
%3:gr32bit = AHIMuxK %0.subreg_l32, -1, implicit-def dead $cc
- ADJCALLSTACKDOWN 0, 0
CallBRASL @malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc
- ADJCALLSTACKUP 0, 0
%55:gr32bit = AHIMuxK %0.subreg_l32, 3, implicit-def dead $cc
%56:addr64bit = LGHI 0
%57:gr64bit = COPY %0
diff --git a/llvm/test/CodeGen/SystemZ/frame-28.mir b/llvm/test/CodeGen/SystemZ/frame-28.mir
index dd5933a..254b8a2c 100644
--- a/llvm/test/CodeGen/SystemZ/frame-28.mir
+++ b/llvm/test/CodeGen/SystemZ/frame-28.mir
@@ -162,6 +162,8 @@ body: |
---
name: fun4
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, size: 5000 }
- { id: 1, size: 2500 }
@@ -177,9 +179,7 @@ body: |
VST64 renamable $f16d, %stack.0, 0, $noreg
VST64 renamable $f16d, %stack.0, 0, $noreg
VST64 renamable $f16d, %stack.1, 0, $noreg
- ADJCALLSTACKDOWN 0, 0
CallBRASL @foo, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit $fpc, implicit-def $r2l
- ADJCALLSTACKUP 0, 0
$f17d = IMPLICIT_DEF
VST64 renamable $f17d, %stack.1, 0, $noreg
Return
diff --git a/llvm/test/CodeGen/SystemZ/frame-adjstack.ll b/llvm/test/CodeGen/SystemZ/frame-adjstack.ll
new file mode 100644
index 0000000..7edacaa
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/frame-adjstack.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -verify-machineinstrs | FileCheck %s
+;
+; Test that inserting a new MBB near a call during finalize isel custom
+; insertion does not cause all frame instructions to be missed. That would
+; result in a missing to set the AdjustsStack flag.
+
+; CHECK-LABEL: fun
+define void @fun(i1 %cc) {
+ %sel = select i1 %cc, i32 5, i32 0
+ tail call void @input_report_abs(i32 %sel)
+ %sel2 = select i1 %cc, i32 6, i32 1
+ tail call void @input_report_abs(i32 %sel2)
+ ret void
+}
+
+declare void @input_report_abs(i32)
diff --git a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
index e52fd44..3e00b60 100644
--- a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
+++ b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
@@ -48,6 +48,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -125,6 +126,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -202,6 +204,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -279,6 +282,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/CodeGen/SystemZ/int-usub-12.ll b/llvm/test/CodeGen/SystemZ/int-usub-12.ll
index c39a6da..147fbfd 100644
--- a/llvm/test/CodeGen/SystemZ/int-usub-12.ll
+++ b/llvm/test/CodeGen/SystemZ/int-usub-12.ll
@@ -11,6 +11,7 @@ define zeroext i1 @f1(i128 %a, i128 %b, ptr %res) {
; CHECK-NEXT: vscbiq %v2, %v1, %v0
; CHECK-NEXT: vlgvg %r2, %v2, 1
; CHECK-NEXT: vsq %v0, %v1, %v0
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: vst %v0, 0(%r4), 3
; CHECK-NEXT: br %r14
%t = call {i128, i1} @llvm.usub.with.overflow.i128(i128 %a, i128 %b)
@@ -27,6 +28,7 @@ define zeroext i1 @f2(i128 %a, i128 %b) {
; CHECK-NEXT: vl %v1, 0(%r2), 3
; CHECK-NEXT: vscbiq %v0, %v1, %v0
; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: br %r14
%t = call {i128, i1} @llvm.usub.with.overflow.i128(i128 %a, i128 %b)
%obit = extractvalue {i128, i1} %t, 1
@@ -46,5 +48,25 @@ define i128 @f3(i128 %a, i128 %b) {
ret i128 %val
}
+define i128 @f4(i128 %a, i128 %b) {
+; CHECK-LABEL: f4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl %v0, 0(%r4), 3
+; CHECK-NEXT: vl %v1, 0(%r3), 3
+; CHECK-NEXT: vscbiq %v2, %v1, %v0
+; CHECK-NEXT: vlgvf %r0, %v2, 3
+; CHECK-NEXT: vgbm %v2, 0
+; CHECK-NEXT: xilf %r0, 1
+; CHECK-NEXT: jl .LBB3_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: vsq %v2, %v1, %v0
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: vst %v2, 0(%r2), 3
+; CHECK-NEXT: br %r14
+ %val = call i128 @llvm.usub.sat.i128(i128 %a, i128 %b)
+ ret i128 %val
+}
+
declare {i128, i1} @llvm.usub.with.overflow.i128(i128, i128) nounwind readnone
+declare i128 @llvm.usub.sat.i128(i128, i128) nounwind readnone
diff --git a/llvm/test/CodeGen/SystemZ/int-usub-13.ll b/llvm/test/CodeGen/SystemZ/int-usub-13.ll
index 637e1a8..794af3b 100644
--- a/llvm/test/CodeGen/SystemZ/int-usub-13.ll
+++ b/llvm/test/CodeGen/SystemZ/int-usub-13.ll
@@ -15,6 +15,7 @@ define zeroext i1 @f1(i256 %a, i256 %b, ptr %res) {
; CHECK-NEXT: vlgvg %r2, %v5, 1
; CHECK-NEXT: vsbiq %v0, %v1, %v0, %v4
; CHECK-NEXT: vsq %v1, %v3, %v2
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: vst %v1, 16(%r4), 3
; CHECK-NEXT: vst %v0, 0(%r4), 3
; CHECK-NEXT: br %r14
@@ -35,6 +36,7 @@ define zeroext i1 @f2(i256 %a, i256 %b) {
; CHECK-NEXT: vscbiq %v2, %v3, %v2
; CHECK-NEXT: vsbcbiq %v0, %v1, %v0, %v2
; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: xilf %r2, 1
; CHECK-NEXT: br %r14
%t = call {i256, i1} @llvm.usub.with.overflow.i256(i256 %a, i256 %b)
%obit = extractvalue {i256, i1} %t, 1
diff --git a/llvm/test/CodeGen/SystemZ/readcyclecounter.ll b/llvm/test/CodeGen/SystemZ/readcyclecounter.ll
new file mode 100644
index 0000000..34b6d34
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/readcyclecounter.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=s390x-ibm-linux | FileCheck %s
+
+; Verify that we correctly lower ISD::READCYCLECOUNTER.
+
+define i64 @test_builtin_readcyclecounter1() {
+; CHECK-LABEL: test_builtin_readcyclecounter1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: aghi %r15, -168
+; CHECK-NEXT: .cfi_def_cfa_offset 328
+; CHECK-NEXT: stckf 160(%r15)
+; CHECK-NEXT: lg %r2, 160(%r15)
+; CHECK-NEXT: aghi %r15, 168
+; CHECK-NEXT: br %r14
+ %1 = tail call i64 @llvm.readcyclecounter()
+ ret i64 %1
+}
+
+define void @test_builtin_readcyclecounter2(ptr %ptr) {
+; CHECK-LABEL: test_builtin_readcyclecounter2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stckf 0(%r2)
+; CHECK-NEXT: br %r14
+ %1 = tail call i64 @llvm.readcyclecounter()
+ store i64 %1, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
index f709b70..bf58550 100644
--- a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
+++ b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
@@ -49,6 +49,8 @@ body: |
---
name: segfault
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
liveins: []
body: |
; CHECK-LABEL: name: segfault
diff --git a/llvm/test/CodeGen/SystemZ/swifterror.ll b/llvm/test/CodeGen/SystemZ/swifterror.ll
index 3ea29f1..1b18287 100644
--- a/llvm/test/CodeGen/SystemZ/swifterror.ll
+++ b/llvm/test/CodeGen/SystemZ/swifterror.ll
@@ -30,8 +30,8 @@ entry:
define float @caller(ptr %error_ref) {
; CHECK-LABEL: caller:
; Make a copy of error_ref because r2 is getting clobbered
-; CHECK: lgr %r[[REG1:[0-9]+]], %r2
-; CHECK: lghi %r9, 0
+; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
+; CHECK-DAG: lghi %r9, 0
; CHECK: brasl %r14, foo
; CHECK: %r2, %r9
; CHECK: jlh
@@ -197,7 +197,7 @@ define void @foo_sret(ptr sret(%struct.S) %agg.result, i32 %val1, ptr swifterror
; CHECK-LABEL: foo_sret:
; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
; CHECK-DAG: lr %r[[REG2:[0-9]+]], %r3
-; CHECK: lghi %r2, 16
+; CHECK-DAG: lghi %r2, 16
; CHECK: brasl %r14, malloc
; CHECK: mvi 8(%r2), 1
; CHECK: st %r[[REG2]], 4(%r[[REG1]])
@@ -280,7 +280,7 @@ define float @caller_with_multiple_swifterror_values(ptr %error_ref, ptr %error_
; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
; CHECK-DAG: lgr %r[[REG2:[0-9]+]], %r3
; The first swifterror value:
-; CHECK: lghi %r9, 0
+; CHECK-DAG: lghi %r9, 0
; CHECK: brasl %r14, foo
; CHECK: ltgr %r2, %r9
; CHECK: jlh
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index 69e1c2f..9d77744 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -1649,8 +1649,8 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI36_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: ldr %f2, %f8
@@ -1707,14 +1707,14 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: larl %r1, .LCPI37_1
; S390X-NEXT: le %f1, 0(%r1)
-; S390X-NEXT: ler %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ler %f8, %f0
; S390X-NEXT: ler %f0, %f1
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: larl %r1, .LCPI37_2
; S390X-NEXT: le %f1, 0(%r1)
-; S390X-NEXT: ler %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ler %f9, %f0
; S390X-NEXT: ler %f0, %f1
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: ler %f2, %f9
@@ -1784,14 +1784,14 @@ define void @constrained_vector_powi_v3f64(ptr %a) #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI38_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI38_2
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: std %f0, 16(%r13)
@@ -1865,20 +1865,20 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_2
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_3
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f10, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f10, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: ldr %f2, %f10
diff --git a/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll b/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll
index e252469..db67ac5 100644
--- a/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-ada-relocations.ll
@@ -56,9 +56,9 @@ entry:
declare signext i32 @callout(i32 signext)
; CHECK: .section ".ada"
-; CHECK: .set @@DoFunc@indirect0, DoFunc
-; CHECK: .indirect_symbol @@DoFunc@indirect0
-; CHECK: .quad V(@@DoFunc@indirect0) * Offset 0 pointer to function descriptor DoFunc
+; CHECK: .set L#DoFunc@indirect0, DoFunc
+; CHECK: .indirect_symbol L#DoFunc@indirect0
+; CHECK: .quad V(L#DoFunc@indirect0) * Offset 0 pointer to function descriptor DoFunc
; CHECK: .quad R(Caller) * Offset 8 function descriptor of Caller
; CHECK: .quad V(Caller)
; CHECK: .quad A(i2) * Offset 24 pointer to data symbol i2
diff --git a/llvm/test/CodeGen/SystemZ/zos-landingpad.ll b/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
index 7f3214d..9db1011 100644
--- a/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
@@ -19,7 +19,7 @@ done:
lpad:
%0 = landingpad { ptr, i32 } cleanup
; The Exception Pointer is %r1; the Exception Selector, %r2.
-; CHECK: @BB{{[^%]*}} %lpad
+; CHECK: L#BB{{[^%]*}} %lpad
; CHECK-DAG: stg 1, {{.*}}
; CHECK-DAG: st 2, {{.*}}
%1 = extractvalue { ptr, i32 } %0, 0
diff --git a/llvm/test/CodeGen/SystemZ/zos-ppa2.ll b/llvm/test/CodeGen/SystemZ/zos-ppa2.ll
index 60580ae..189b5a3 100644
--- a/llvm/test/CodeGen/SystemZ/zos-ppa2.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-ppa2.ll
@@ -2,24 +2,24 @@
; REQUIRES: systemz-registered-target
; CHECK: .section ".ppa2"
-; CHECK: @@PPA2:
+; CHECK: L#PPA2:
; CHECK: .byte 3
; CHECK: .byte 231
; CHECK: .byte 34
; CHECK: .byte 4
-; CHECK: .long CELQSTRT-@@PPA2
+; CHECK: .long CELQSTRT-L#PPA2
; CHECK: .long 0
-; CHECK: .long @@DVS-@@PPA2
+; CHECK: .long L#DVS-L#PPA2
; CHECK: .long 0
; CHECK: .byte 129
; CHECK: .byte 0
; CHECK: .short 0
-; CHECK: @@DVS:
+; CHECK: L#DVS:
; CHECK: .ascii "\361\371\367\360\360\361\360\361\360\360\360\360\360\360"
; CHECK: .short 0
-; CHECK: .quad @@PPA2-CELQSTRT * A(PPA2-CELQSTRT)
-; CHECK: @@PPA1_void_test_0:
-; CHECK: .long @@PPA2-@@PPA1_void_test_0 * Offset to PPA2
+; CHECK: .quad L#PPA2-CELQSTRT * A(PPA2-CELQSTRT)
+; CHECK: L#PPA1_void_test_0:
+; CHECK: .long L#PPA2-L#PPA1_void_test_0 * Offset to PPA2
; CHECK: .section "B_IDRL"
; CHECK: .byte 0
; CHECK: .byte 3
diff --git a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
index 8c04116..d3e5823 100644
--- a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
@@ -15,7 +15,7 @@
; CHECK64: aghi 4, 192
; CHECK64: b 2(7)
-; CHECK64: @@PPA1_func0_0:
+; CHECK64: L#PPA1_func0_0:
; CHECK64: .short 0 * Length/4 of Parms
define void @func0() {
call i64 (i64) @fun(i64 10)
@@ -31,7 +31,7 @@ define void @func0() {
; CHECK64: aghi 4, 160
; CHECK64: b 2(7)
-; CHECK64: @@PPA1_func1_0:
+; CHECK64: L#PPA1_func1_0:
; CHECK64: .short 2 * Length/4 of Parms
define void @func1(ptr %ptr) {
%l01 = load volatile i64, ptr %ptr
@@ -336,16 +336,16 @@ define void @large_stack0() {
; CHECK64: lgr 0, 3
; CHECK64: llgt 3, 1208
; CHECK64: cg 4, 64(3)
-; CHECK64: jhe @BB7_2
+; CHECK64: jhe L#BB7_2
; CHECK64: %bb.1:
; CHECK64: lg 3, 72(3)
; CHECK64: basr 3, 3
; CHECK64: bcr 0, 7
-; CHECK64: @BB7_2:
+; CHECK64: L#BB7_2:
; CHECK64: stmg 6, 7, 2064(4)
; CHECK64: lgr 3, 0
-; CHECK64: @@PPA1_large_stack1_0:
+; CHECK64: L#PPA1_large_stack1_0:
; CHECK64: .short 6 * Length/4 of Parms
define void @large_stack1(i64 %n1, i64 %n2, i64 %n3) {
%arr = alloca [131072 x i64], align 8
@@ -361,12 +361,12 @@ define void @large_stack1(i64 %n1, i64 %n2, i64 %n3) {
; CHECK64: agfi 4, -1048768
; CHECK64: llgt 3, 1208
; CHECK64: cg 4, 64(3)
-; CHECK64: jhe @BB8_2
+; CHECK64: jhe L#BB8_2
; CHECK64: %bb.1:
; CHECK64: lg 3, 72(3)
; CHECK64: basr 3, 3
; CHECK64: bcr 0, 7
-; CHECK64: @BB8_2:
+; CHECK64: L#BB8_2:
; CHECK64: lgr 3, 0
; CHECK64: lg 3, 2192(3)
; CHECK64: stmg 4, 12, 2048(4)
diff --git a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
index 767b702..a0f8374 100644
--- a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
+++ b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
@@ -42,9 +42,8 @@ define i64 @loopif(ptr nocapture readonly %x, i32 %y, i32 %n) {
; CHECK-NEXT: cmp r2, #1
; CHECK-NEXT: blt .LBB1_4
; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph
-; CHECK-NEXT: mov lr, r2
-; CHECK-NEXT: mov r12, r0
; CHECK-NEXT: dls lr, r2
+; CHECK-NEXT: mov r12, r0
; CHECK-NEXT: movs r0, #0
; CHECK-NEXT: movs r3, #0
; CHECK-NEXT: .p2align 2
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index 4ab5697..93cab25 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -542,9 +542,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(ptr noalias nocapture reado
; CHECK-NEXT: .pad #28
; CHECK-NEXT: sub sp, #28
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #4] @ 8-byte Folded Spill
; CHECK-NEXT: blt .LBB11_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
@@ -661,9 +659,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: .pad #136
; CHECK-NEXT: sub sp, #136
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #64] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #68] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #64] @ 8-byte Folded Spill
; CHECK-NEXT: blt.w .LBB12_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
@@ -952,11 +948,9 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(ptr noalias nocapture read
; CHECK-NEXT: vstrw.32 q1, [sp, #152] @ 16-byte Spill
; CHECK-NEXT: vldrw.u32 q1, [sp, #296] @ 16-byte Reload
; CHECK-NEXT: vstrw.32 q0, [sp, #168] @ 16-byte Spill
-; CHECK-NEXT: vmov q0, q2
-; CHECK-NEXT: vmov q3, q5
-; CHECK-NEXT: vadd.i32 q1, q1, r0
; CHECK-NEXT: vldrw.u32 q0, [sp, #248] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q3, [sp, #216] @ 16-byte Reload
+; CHECK-NEXT: vadd.i32 q1, q1, r0
; CHECK-NEXT: vstrw.32 q5, [sp, #120] @ 16-byte Spill
; CHECK-NEXT: vadd.i32 q0, q0, r0
; CHECK-NEXT: subs.w r11, r11, #16
@@ -1243,9 +1237,7 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: .pad #64
; CHECK-NEXT: sub sp, #64
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #56] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #60] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #56] @ 8-byte Folded Spill
; CHECK-NEXT: blt.w .LBB14_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: adr r5, .LCPI14_3
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
index 18c8a8a..7b8b884 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
@@ -609,7 +609,6 @@ define dso_local void @arm_mat_mult_q15(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: strd r0, r2, [sp, #24] @ 8-byte Folded Spill
; CHECK-NEXT: cmp r3, #0
; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: mov r0, r3
; CHECK-NEXT: itt ne
; CHECK-NEXT: ldrne r0, [sp, #136]
; CHECK-NEXT: cmpne r0, #0
diff --git a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
index 9987ff9..77980be 100644
--- a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
@@ -108,9 +108,7 @@ define void @correlate(ptr nocapture noundef readonly %ID, ptr nocapture noundef
; CHECK-NEXT: .pad #12
; CHECK-NEXT: sub sp, #12
; CHECK-NEXT: cmp r3, #1
-; CHECK-NEXT: strd r0, r1, [sp] @ 8-byte Folded Spill
-; CHECK-NEXT: mov r1, r3
-; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: stm.w sp, {r0, r1, r3} @ 12-byte Folded Spill
; CHECK-NEXT: blt .LBB4_12
; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph
; CHECK-NEXT: ldr r1, [sp, #48]
diff --git a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
index 82a186b..c03339b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
@@ -1062,9 +1062,8 @@ define arm_aapcs_vfpcc void @_Z37_arm_radix4_butterfly_inverse_f32_mvePK21arm_cf
; CHECK-NEXT: .pad #40
; CHECK-NEXT: sub sp, #40
; CHECK-NEXT: cmp r2, #8
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: vstr s0, [sp] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: blo .LBB7_9
; CHECK-NEXT: @ %bb.1:
diff --git a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
index 219541c..2e51e9e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
@@ -95,14 +95,13 @@ define void @vldst4(ptr nocapture readonly %pIn, ptr nocapture %pOut, i32 %numRo
; CHECK-NEXT: vmovx.f16 s8, s27
; CHECK-NEXT: vins.f16 s12, s24
; CHECK-NEXT: vins.f16 s13, s25
+; CHECK-NEXT: vins.f16 s2, s10
; CHECK-NEXT: vins.f16 s3, s11
; CHECK-NEXT: vins.f16 s1, s9
-; CHECK-NEXT: vins.f16 s2, s10
; CHECK-NEXT: vins.f16 s22, s8
; CHECK-NEXT: vmov q2, q3
-; CHECK-NEXT: vmov.f32 s17, s0
-; CHECK-NEXT: vmov.f32 s10, s4
; CHECK-NEXT: vmov q6, q0
+; CHECK-NEXT: vmov.f32 s10, s4
; CHECK-NEXT: vmov.f32 s11, s7
; CHECK-NEXT: vmov.f32 s9, s0
; CHECK-NEXT: vmov.f32 s17, s2
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
index f28311e..f9b175e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode -run-pass arm-mve-vpt-opts %s -o - | FileCheck %s
+# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode -run-pass arm-mve-vpt-opts -verify-machineinstrs %s -o - | FileCheck %s
---
name: vcmp_with_opposite_cond
@@ -1021,3 +1021,26 @@ body: |
%16:mqpr = MVE_VORR %15, %15, 1, %10, $noreg, undef %16
%17:mqpr = MVE_VORR %16, %16, 1, %11, $noreg, undef %17
...
+---
+name: reuse_kill_flags
+alignment: 4
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: reuse_kill_flags
+ ; CHECK: [[t2MOVi:%[0-9]+]]:tgpreven = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vccr = COPY [[t2MOVi]]
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:mqpr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR [[DEF]], [[DEF]], 1, [[COPY]], $noreg, undef [[MVE_VORR]]
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:mqpr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[DEF1]], [[DEF1]], 1, killed [[COPY]], $noreg, undef [[MVE_VORR1]]
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit [[DEF1]]
+ %0:tgpreven = t2MOVi 0, 14, $noreg, $noreg
+ %1:vccr = COPY %0:tgpreven
+ %2:mqpr = IMPLICIT_DEF
+ %3:mqpr = MVE_VORR %2:mqpr, %2:mqpr, 1, killed %1, $noreg, undef %3
+ %4:vccr = COPY %0:tgpreven
+ %5:mqpr = IMPLICIT_DEF
+ %6:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, killed %4, $noreg, undef %6
+ tBX_RET 14 /* CC::al */, $noreg, implicit %5:mqpr
+
+...
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
index aa4d877..4a63c81 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
@@ -59,12 +59,12 @@ entry:
%call = call i32 @setjmp(ptr %buf) #0
call void @longjmp(ptr %buf, i32 1) #1
unreachable
-; SJLJ: call saveSetjmp
+; SJLJ: call __wasm_setjmp
; SJLJ: i32.const emscripten_longjmp
; SJLJ-NOT: i32.const emscripten_longjmp_jmpbuf
; SJLJ: call invoke_vii
; SJLJ-NOT: call "__invoke_void_ptr_i32"
-; SJLJ: call testSetjmp
+; SJLJ: call __wasm_setjmp_test
; NONE: call setjmp
; NONE: call longjmp
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
index 7cf05cc..32942cd 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
@@ -49,7 +49,7 @@ try.cont: ; preds = %lpad, %entry
; longjmp checking part
; CHECK: if.then1:
-; CHECK: call i32 @testSetjmp
+; CHECK: call i32 @__wasm_setjmp_test
}
; @foo can either throw an exception or longjmp. Because this function doesn't
@@ -117,7 +117,6 @@ if.end: ; preds = %entry
; CHECK: rethrow.exn:
; CHECK-NEXT: %exn = call ptr @__cxa_find_matching_catch_2()
-; CHECK-NEXT: call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__resumeException(ptr %exn)
; CHECK-NEXT: unreachable
@@ -147,7 +146,6 @@ throw: ; preds = %if.end, %entry
unreachable
; CHECK: throw:
-; CHECK-NEXT: call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__cxa_throw(ptr null, ptr null, ptr null)
; CHECK-NEXT: unreachable
}
@@ -208,7 +206,6 @@ return: ; preds = %entry, %if.end
; CHECK: rethrow.exn:
; CHECK-NEXT: %exn = call ptr @__cxa_find_matching_catch_2()
-; CHECK-NEXT: tail call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__resumeException(ptr %exn)
; CHECK-NEXT: unreachable
}
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
index 1a85a63..79ae161 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
@@ -12,7 +12,7 @@ target triple = "wasm32-unknown-emscripten"
; CHECK-LABEL: @malloc_test
define void @malloc_test() {
entry:
- ; CHECK: call ptr @malloc
+ ; CHECK: alloca i32
%retval = alloca i32, align 4
%jmp = alloca [1 x %struct.__jmp_buf_tag], align 16
store i32 0, ptr %retval, align 4
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
index 4f69415..fec9836 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
@@ -16,25 +16,22 @@ entry:
call void @foo(), !dbg !7
ret void, !dbg !8
; CHECK: entry:
- ; CHECK-NEXT: call ptr @malloc(i32 40), !dbg ![[DL0:.*]]
+ ; CHECK-NEXT: %functionInvocationId = alloca i32, align 4, !dbg ![[DL0:.*]]
; CHECK: entry.split:
; CHECK: alloca {{.*}}, !dbg ![[DL0]]
- ; CHECK: call ptr @saveSetjmp{{.*}}, !dbg ![[DL1:.*]]
- ; CHECK-NEXT: call i32 @getTempRet0{{.*}}, !dbg ![[DL1]]
+ ; CHECK: call void @__wasm_setjmp{{.*}}, !dbg ![[DL1:.*]]
; CHECK-NEXT: br {{.*}}, !dbg ![[DL2:.*]]
; CHECK: entry.split.split:
; CHECK: call {{.*}} void @__invoke_void{{.*}}, !dbg ![[DL2]]
; CHECK: entry.split.split.split:
- ; CHECK-NEXT: call void @free{{.*}}, !dbg ![[DL3:.*]]
; CHECK: if.then1:
- ; CHECK: call i32 @testSetjmp{{.*}}, !dbg ![[DL2]]
+ ; CHECK: call i32 @__wasm_setjmp_test{{.*}}, !dbg ![[DL2]]
; CHECK: if.end:
- ; CHECK: call i32 @getTempRet0{{.*}}, !dbg ![[DL2]]
; CHECK: call.em.longjmp:
; CHECK: call void @emscripten_longjmp{{.*}}, !dbg ![[DL2]]
@@ -43,26 +40,6 @@ entry:
; CHECK: call void @setTempRet0{{.*}}, !dbg ![[DL2]]
}
-; No instruction has debug info but the current function (setjmp_debug_info2)
-; and the called function (malloc / free) have DISubprograms, so the newly
-; generated calls should have debug info attached. We don't have an instruction
-; to take debug info from, so we create dummy debug info.
-define void @setjmp_debug_info1() !dbg !9 {
-; CHECK-LABEL: @setjmp_debug_info1
-entry:
- %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
- %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], ptr %buf, i32 0, i32 0
- %call = call i32 @setjmp(ptr %arraydecay) #0
- call void @foo()
- ret void
- ; CHECK: call ptr @malloc(i32 40), !dbg ![[DL_DUMMY:.*]]
- ; CHECK: call void @free{{.*}}, !dbg ![[DL_DUMMY]]
-}
-
-; Note that these functions have DISubprograms.
-declare !dbg !10 ptr @malloc(i32)
-declare !dbg !11 void @free(ptr)
-
declare void @foo()
; Function Attrs: returns_twice
declare i32 @setjmp(ptr) #0
@@ -79,9 +56,3 @@ declare i32 @setjmp(ptr) #0
!6 = !DILocation(line:4, scope: !3)
!7 = !DILocation(line:5, scope: !3)
!8 = !DILocation(line:6, scope: !3)
-!9 = distinct !DISubprogram(name: "setjmp_debug_info1", unit:!2, file: !1, line: 50)
-!10 = !DISubprogram(name: "malloc", file: !1, line: 10, isDefinition: false)
-!11 = !DISubprogram(name: "free", file: !1, line: 20, isDefinition: false)
-
-; Dummy debug info generated
-; CHECK: ![[DL_DUMMY]] = !DILocation(line: 50, column: 1, scope: !9)
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
index 7115b01..27ec95a 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
@@ -22,15 +22,12 @@ entry:
call void @longjmp(ptr %buf, i32 1) #1
unreachable
; CHECK: entry:
-; CHECK-NEXT: %[[MALLOCCALL:.*]] = tail call ptr @malloc([[PTR]] 40)
-; CHECK-NEXT: store i32 0, ptr %[[MALLOCCALL]]
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE:.*]] = add i32 4, 0
+; CHECK-NEXT: %functionInvocationId = alloca i32, align 4
; CHECK-NEXT: br label %entry.split
; CHECK: entry.split
; CHECK-NEXT: %[[BUF:.*]] = alloca [1 x %struct.__jmp_buf_tag]
-; CHECK-NEXT: %[[SETJMP_TABLE1:.*]] = call ptr @saveSetjmp(ptr %[[BUF]], i32 1, ptr %[[MALLOCCALL]], i32 %[[SETJMP_TABLE_SIZE]])
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE1:.*]] = call i32 @getTempRet0()
+; CHECK-NEXT: call void @__wasm_setjmp(ptr %[[BUF]], i32 1, ptr %functionInvocationId)
; CHECK-NEXT: br label %entry.split.split
; CHECK: entry.split.split:
@@ -51,8 +48,7 @@ entry:
; CHECK: if.then1:
; CHECK-NEXT: %[[__THREW__VAL_P:.*]] = inttoptr [[PTR]] %[[__THREW__VAL]] to ptr
-; CHECK-NEXT: %[[__THREW__VAL_P_LOADED:.*]] = load [[PTR]], ptr %[[__THREW__VAL_P]]
-; CHECK-NEXT: %[[LABEL:.*]] = call i32 @testSetjmp([[PTR]] %[[__THREW__VAL_P_LOADED]], ptr %[[SETJMP_TABLE1]], i32 %[[SETJMP_TABLE_SIZE1]])
+; CHECK-NEXT: %[[LABEL:.*]] = call i32 @__wasm_setjmp_test(ptr %[[__THREW__VAL_P]], ptr %functionInvocationId)
; CHECK-NEXT: %[[CMP:.*]] = icmp eq i32 %[[LABEL]], 0
; CHECK-NEXT: br i1 %[[CMP]], label %call.em.longjmp, label %if.end2
@@ -69,7 +65,6 @@ entry:
; CHECK: call.em.longjmp:
; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %[[__THREW__VAL]], %if.then1 ]
; CHECK-NEXT: %threwvalue.phi = phi i32 [ %[[THREWVALUE_VAL]], %if.then1 ]
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE1]])
; CHECK-NEXT: call void @emscripten_longjmp([[PTR]] %threw.phi, i32 %threwvalue.phi)
; CHECK-NEXT: unreachable
@@ -87,13 +82,12 @@ entry:
call void @foo()
ret void
; CHECK: entry:
-; CHECK: %[[SETJMP_TABLE:.*]] = call ptr @saveSetjmp(
+; CHECK: call void @__wasm_setjmp(
; CHECK: entry.split.split:
; CHECK: @__invoke_void(ptr @foo)
; CHECK: entry.split.split.split:
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE]])
; CHECK-NEXT: ret void
}
@@ -110,9 +104,8 @@ entry:
call void @foo()
ret void
; CHECK: call.em.longjmp:
-; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %__THREW__.val, %if.then1 ], [ %__THREW__.val4, %if.then15 ]
-; CHECK-NEXT: %threwvalue.phi = phi i32 [ %__threwValue.val, %if.then1 ], [ %__threwValue.val8, %if.then15 ]
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE1]])
+; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %__THREW__.val, %if.then1 ], [ %__THREW__.val2, %if.then13 ]
+; CHECK-NEXT: %threwvalue.phi = phi i32 [ %__threwValue.val, %if.then1 ], [ %__threwValue.val6, %if.then13 ]
; CHECK-NEXT: call void @emscripten_longjmp([[PTR]] %threw.phi, i32 %threwvalue.phi)
; CHECK-NEXT: unreachable
}
@@ -145,7 +138,6 @@ entry:
%cmp = icmp sgt i32 %n, 5
br i1 %cmp, label %if.then, label %if.end
; CHECK: entry:
-; CHECK: %[[SETJMP_TABLE_SIZE0:.*]] = add i32 4, 0
if.then: ; preds = %entry
%0 = load i32, ptr @global_var, align 4
@@ -154,13 +146,10 @@ if.then: ; preds = %entry
br label %if.end
; CHECK: if.then:
; CHECK: %[[VAR0:.*]] = load i32, ptr @global_var, align 4
-; CHECK: %[[SETJMP_TABLE1:.*]] = call ptr @saveSetjmp(
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE1:.*]] = call i32 @getTempRet0()
+; CHECK: call void @__wasm_setjmp(
; CHECK: if.then.split:
-; CHECK: %[[VAR1:.*]] = phi i32 [ %[[VAR2:.*]], %if.end3 ], [ %[[VAR0]], %if.then ]
-; CHECK: %[[SETJMP_TABLE_SIZE2:.*]] = phi i32 [ %[[SETJMP_TABLE_SIZE1]], %if.then ], [ %[[SETJMP_TABLE_SIZE3:.*]], %if.end3 ]
-; CHECK: %[[SETJMP_TABLE2:.*]] = phi ptr [ %[[SETJMP_TABLE1]], %if.then ], [ %[[SETJMP_TABLE3:.*]], %if.end3 ]
+; CHECK: %[[VAR1:.*]] = phi i32 [ %[[VAR2:.*]], %if.end1 ], [ %[[VAR0]], %if.then ]
; CHECK: store i32 %[[VAR1]], ptr @global_var, align 4
if.end: ; preds = %if.then, %entry
@@ -168,8 +157,6 @@ if.end: ; preds = %if.then, %entry
unreachable
; CHECK: if.end:
; CHECK: %[[VAR2]] = phi i32 [ %[[VAR1]], %if.then.split ], [ undef, %entry.split ]
-; CHECK: %[[SETJMP_TABLE_SIZE3]] = phi i32 [ %[[SETJMP_TABLE_SIZE2]], %if.then.split ], [ %[[SETJMP_TABLE_SIZE0]], %entry.split ]
-; CHECK: %[[SETJMP_TABLE3]] = phi ptr [ %[[SETJMP_TABLE2]], %if.then.split ], [ %setjmpTable, %entry.split ]
}
; Test a case when a function only calls other functions that are neither setjmp nor longjmp
@@ -296,8 +283,8 @@ declare void @free(ptr)
; JS glue functions and invoke wrappers declaration
; CHECK-DAG: declare i32 @getTempRet0()
; CHECK-DAG: declare void @setTempRet0(i32)
-; CHECK-DAG: declare ptr @saveSetjmp(ptr, i32, ptr, i32)
-; CHECK-DAG: declare i32 @testSetjmp([[PTR]], ptr, i32)
+; CHECK-DAG: declare void @__wasm_setjmp(ptr, i32, ptr)
+; CHECK-DAG: declare i32 @__wasm_setjmp_test(ptr, ptr)
; CHECK-DAG: declare void @emscripten_longjmp([[PTR]], i32)
; CHECK-DAG: declare void @__invoke_void(ptr)
@@ -308,8 +295,8 @@ attributes #3 = { allocsize(0) }
; CHECK-DAG: attributes #{{[0-9]+}} = { nounwind "wasm-import-module"="env" "wasm-import-name"="getTempRet0" }
; CHECK-DAG: attributes #{{[0-9]+}} = { nounwind "wasm-import-module"="env" "wasm-import-name"="setTempRet0" }
; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__invoke_void" }
-; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="saveSetjmp" }
-; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="testSetjmp" }
+; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__wasm_setjmp" }
+; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__wasm_setjmp_test" }
; CHECK-DAG: attributes #{{[0-9]+}} = { noreturn "wasm-import-module"="env" "wasm-import-name"="emscripten_longjmp" }
; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__invoke_ptr_i32_ptr" }
; CHECK-DAG: attributes #[[ALLOCSIZE_ATTR]] = { allocsize(1) }
diff --git a/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
index 25471eb..bd8db83 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
@@ -108,8 +108,8 @@ catch: ; preds = %catch.start
call void @__cxa_end_catch() [ "funclet"(token %2) ]
catchret from %2 to label %catchret.dest
; CHECK: catch: ; preds = %catch.start
-; CHECK-NEXT: %exn = load ptr, ptr %exn.slot15, align 4
-; CHECK-NEXT: %5 = call ptr @__cxa_begin_catch(ptr %exn) #7 [ "funclet"(token %2) ]
+; CHECK-NEXT: %exn = load ptr, ptr %exn.slot6, align 4
+; CHECK-NEXT: %5 = call ptr @__cxa_begin_catch(ptr %exn) #6 [ "funclet"(token %2) ]
; CHECK-NEXT: invoke void @__cxa_end_catch() [ "funclet"(token %2) ]
; CHECK-NEXT: to label %.noexc unwind label %catch.dispatch.longjmp
@@ -265,7 +265,7 @@ ehcleanup: ; preds = %entry
; (cleanuppad), whose parent is 'none', so we should unwind directly to
; %catch.dispatch.longjmp.
%call2 = call noundef ptr @_ZN4TempD2Ev(ptr noundef %t) #2 [ "funclet"(token %0) ]
-; CHECK: %call13 = invoke {{.*}} ptr @_ZN4TempD2Ev(ptr
+; CHECK: %call11 = invoke {{.*}} ptr @_ZN4TempD2Ev(ptr
; CHECK-NEXT: to label {{.*}} unwind label %catch.dispatch.longjmp
cleanupret from %0 unwind to caller
}
diff --git a/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
index b8d2230..82c04e2 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
@@ -25,16 +25,12 @@ entry:
unreachable
; CHECK: entry:
-; CHECK-NEXT: %setjmpTable = tail call ptr @malloc([[PTR]] 40)
-; CHECK-NEXT: store i32 0, ptr %setjmpTable, align 4
-; CHECK-NEXT: %setjmpTableSize = add i32 4, 0
+; CHECK-NEXT: %functionInvocationId = alloca i32, align 4
; CHECK-NEXT: br label %setjmp.dispatch
; CHECK: setjmp.dispatch:
; CHECK-NEXT: %[[VAL2:.*]] = phi i32 [ %val, %if.end ], [ undef, %entry ]
; CHECK-NEXT: %[[BUF:.*]] = phi ptr [ %[[BUF2:.*]], %if.end ], [ undef, %entry ]
-; CHECK-NEXT: %[[SETJMPTABLESIZE2:.*]] = phi i32 [ %[[SETJMPTABLESIZE3:.*]], %if.end ], [ %setjmpTableSize, %entry ]
-; CHECK-NEXT: %[[SETJMPTABLE2:.*]] = phi ptr [ %[[SETJMPTABLE3:.*]], %if.end ], [ %setjmpTable, %entry ]
; CHECK-NEXT: %label.phi = phi i32 [ %label, %if.end ], [ -1, %entry ]
; CHECK-NEXT: switch i32 %label.phi, label %entry.split [
; CHECK-NEXT: i32 1, label %entry.split.split
@@ -42,14 +38,11 @@ entry:
; CHECK: entry.split:
; CHECK-NEXT: %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
-; CHECK-NEXT: %[[SETJMPTABLE4:.*]] = call ptr @saveSetjmp(ptr %buf, i32 1, ptr %[[SETJMPTABLE2]], i32 %[[SETJMPTABLESIZE2]])
-; CHECK-NEXT: %[[SETJMPTABLESIZE4:.*]] = call i32 @getTempRet0()
+; CHECK-NEXT: call void @__wasm_setjmp(ptr %buf, i32 1, ptr %functionInvocationId)
; CHECK-NEXT: br label %entry.split.split
; CHECK: entry.split.split:
; CHECK-NEXT: %[[BUF2]] = phi ptr [ %[[BUF]], %setjmp.dispatch ], [ %buf, %entry.split ]
-; CHECK-NEXT: %[[SETJMPTABLESIZE3]] = phi i32 [ %[[SETJMPTABLESIZE4]], %entry.split ], [ %[[SETJMPTABLESIZE2]], %setjmp.dispatch ]
-; CHECK-NEXT: %[[SETJMPTABLE3]] = phi ptr [ %[[SETJMPTABLE4]], %entry.split ], [ %[[SETJMPTABLE2]], %setjmp.dispatch ]
; CHECK-NEXT: %setjmp.ret = phi i32 [ 0, %entry.split ], [ %[[VAL2]], %setjmp.dispatch ]
; CHECK-NEXT: invoke void @__wasm_longjmp(ptr %[[BUF2]], i32 1)
; CHECK-NEXT: to label %.noexc unwind label %catch.dispatch.longjmp
@@ -67,13 +60,11 @@ entry:
; CHECK-NEXT: %val_gep = getelementptr { ptr, i32 }, ptr %thrown, i32 0, i32 1
; CHECK-NEXT: %env = load ptr, ptr %env_gep, align {{.*}}
; CHECK-NEXT: %val = load i32, ptr %val_gep, align 4
-; CHECK-NEXT: %setjmp.id = load [[PTR]], ptr %env, align {{.*}}
-; CHECK-NEXT: %label = call i32 @testSetjmp([[PTR]] %setjmp.id, ptr %[[SETJMPTABLE3]], i32 %[[SETJMPTABLESIZE3]]) [ "funclet"(token %1) ]
+; CHECK-NEXT: %label = call i32 @__wasm_setjmp_test(ptr %env, ptr %functionInvocationId) [ "funclet"(token %1) ]
; CHECK-NEXT: %2 = icmp eq i32 %label, 0
; CHECK-NEXT: br i1 %2, label %if.then, label %if.end
; CHECK: if.then:
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMPTABLE3]]) [ "funclet"(token %1) ]
; CHECK-NEXT: call void @__wasm_longjmp(ptr %env, i32 %val) [ "funclet"(token %1) ]
; CHECK-NEXT: unreachable
@@ -142,10 +133,9 @@ declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
declare void @free(ptr)
-; JS glue function declarations
-; CHECK-DAG: declare i32 @getTempRet0()
-; CHECK-DAG: declare ptr @saveSetjmp(ptr, i32, ptr, i32)
-; CHECK-DAG: declare i32 @testSetjmp([[PTR]], ptr, i32)
+; Runtime glue function declarations
+; CHECK-DAG: declare void @__wasm_setjmp(ptr, i32, ptr)
+; CHECK-DAG: declare i32 @__wasm_setjmp_test(ptr, ptr)
; CHECK-DAG: declare void @__wasm_longjmp(ptr, i32)
attributes #0 = { returns_twice }
diff --git a/llvm/test/CodeGen/WebAssembly/pr63817.ll b/llvm/test/CodeGen/WebAssembly/pr63817.ll
new file mode 100644
index 0000000..252768d
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/pr63817.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=wasm32 -mattr=+simd128 | FileCheck %s
+
+;; Regression test for a bug in which BUILD_VECTOR nodes with large unsigned
+;; lane constants were not properly selected.
+define <4 x i8> @test(<4 x i8> %0) {
+; CHECK-LABEL: test:
+; CHECK: .functype test (v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: v128.const 255, 17, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK-NEXT: # fallthrough-return
+ %V1 = or <4 x i8> <i8 255, i8 255, i8 255, i8 255>, %0
+ %V2 = insertelement <4 x i8> %V1, i8 17, i32 1
+ ret <4 x i8> %V2
+}
diff --git a/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll b/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll
index 085cde8..7a5baa0 100644
--- a/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll
+++ b/llvm/test/CodeGen/WinCFGuard/cfguard-mingw.ll
@@ -97,7 +97,7 @@ $_ZTI7Derived = comdat any
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN4BaseC2Ev(ptr noundef nonnull align 8 dereferenceable(12) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
ret void
@@ -105,7 +105,7 @@ define weak_odr dso_local dllexport void @_ZN4BaseC2Ev(ptr noundef nonnull align
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN4BaseC1Ev(ptr noundef nonnull align 8 dereferenceable(12) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
ret void
@@ -140,10 +140,10 @@ declare dso_local void @_ZdlPv(ptr noundef) local_unnamed_addr #2
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN7DerivedC2Ev(ptr noundef nonnull align 8 dereferenceable(16) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%3 = getelementptr inbounds %class.Derived, ptr %0, i64 0, i32 1
store i32 0, ptr %3, align 4, !tbaa !12
ret void
@@ -151,10 +151,10 @@ define weak_odr dso_local dllexport void @_ZN7DerivedC2Ev(ptr noundef nonnull al
; Function Attrs: nounwind uwtable
define weak_odr dso_local dllexport void @_ZN7DerivedC1Ev(ptr noundef nonnull align 8 dereferenceable(16) %0) unnamed_addr #0 comdat align 2 {
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV4Base, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%2 = getelementptr inbounds %class.Base, ptr %0, i64 0, i32 1
store i32 0, ptr %2, align 8, !tbaa !8
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr %0, align 8, !tbaa !5
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr %0, align 8, !tbaa !5
%3 = getelementptr inbounds %class.Derived, ptr %0, i64 0, i32 1
store i32 0, ptr %3, align 4, !tbaa !12
ret void
diff --git a/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll b/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
index 535450a..695a2d0 100644
--- a/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
+++ b/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
@@ -9,11 +9,11 @@ define <2 x i64> @_mm_insert_epi16(<2 x i64> %a, i32 %b, i32 %imm) nounwind read
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $32, %esp
-; X86-NEXT: movzwl 8(%ebp), %eax
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: andl $7, %ecx
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movzwl 8(%ebp), %ecx
+; X86-NEXT: andl $7, %eax
; X86-NEXT: movaps %xmm0, (%esp)
-; X86-NEXT: movw %ax, (%esp,%ecx,2)
+; X86-NEXT: movw %cx, (%esp,%eax,2)
; X86-NEXT: movaps (%esp), %xmm0
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir
new file mode 100644
index 0000000..e0fb0fc
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir
@@ -0,0 +1,25 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - 2>%t | FileCheck %s
+# RUN: FileCheck -check-prefix=ILLEGAL %s < %t
+
+# ILLEGAL: remark: <unknown>:0:0: unable to legalize instruction: %2:_(<4 x s1>) = G_ICMP intpred(sle), %0:_(<4 x s64>), %1:_ (in function: test_icmp_v4i64)
+
+# PR86203
+---
+name: test_icmp_v4i64
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: test_icmp_v4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<4 x s1>) = G_ICMP intpred(sle), [[DEF]](<4 x s64>), [[DEF1]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[ICMP]](<4 x s1>)
+ ; CHECK-NEXT: $xmm0 = COPY [[ANYEXT]](<4 x s32>)
+ ; CHECK-NEXT: RET 0, implicit $xmm0
+ %0:_(<4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<4 x s64>) = G_IMPLICIT_DEF
+ %3:_(<4 x s1>) = G_ICMP intpred(sle), %0(<4 x s64>), %1
+ %4:_(<4 x s32>) = G_ANYEXT %3(<4 x s1>)
+ $xmm0 = COPY %4(<4 x s32>)
+ RET 0, implicit $xmm0
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
index ea548c2..20b8b67 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
@@ -23,6 +23,6 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: trap
; CHECK: TRAP
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
...
diff --git a/llvm/test/CodeGen/X86/addcarry.ll b/llvm/test/CodeGen/X86/addcarry.ll
index 3fc4ed9..f8d32fc 100644
--- a/llvm/test/CodeGen/X86/addcarry.ll
+++ b/llvm/test/CodeGen/X86/addcarry.ll
@@ -1490,3 +1490,26 @@ define { i64, i64 } @addcarry_commutative_2(i64 %x0, i64 %x1, i64 %y0, i64 %y1)
%r1 = insertvalue { i64, i64 } %r0, i64 %b1s, 1
ret { i64, i64 } %r1
}
+
+define i1 @pr84831(i64 %arg) {
+; CHECK-LABEL: pr84831:
+; CHECK: # %bb.0:
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: addb $-1, %al
+; CHECK-NEXT: adcq $1, %rcx
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: retq
+ %a = icmp ult i64 0, %arg
+ %add1 = add i64 0, 1
+ %carryout1 = icmp ult i64 %add1, 0
+ %b = zext i1 %a to i64
+ %add2 = add i64 %add1, %b
+ %carryout2 = icmp ult i64 %add2, %add1
+ %zc1 = zext i1 %carryout1 to i63
+ %zc2 = zext i1 %carryout2 to i63
+ %or = or i63 %zc1, %zc2
+ %trunc = trunc i63 %or to i1
+ ret i1 %trunc
+}
diff --git a/llvm/test/CodeGen/X86/allow-check.ll b/llvm/test/CodeGen/X86/allow-check.ll
new file mode 100644
index 0000000..602e5a9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/allow-check.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+define i1 @test_runtime() local_unnamed_addr {
+; CHECK-LABEL: test_runtime:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movb $1, %al
+; CHECK-NEXT: retq
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test_check")
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.runtime.check(metadata) nounwind
+
+define i1 @test_ubsan() local_unnamed_addr {
+; CHECK-LABEL: test_ubsan:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movb $1, %al
+; CHECK-NEXT: retq
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 7)
+ ret i1 %allow
+}
+
+declare i1 @llvm.allow.ubsan.check(i8) nounwind
diff --git a/llvm/test/CodeGen/X86/apx/add.ll b/llvm/test/CodeGen/X86/apx/add.ll
index cdb29a7..d3301ec 100644
--- a/llvm/test/CodeGen/X86/apx/add.ll
+++ b/llvm/test/CodeGen/X86/apx/add.ll
@@ -298,9 +298,9 @@ define i8 @addflag8rr(i8 noundef %a, i8 noundef %b) {
; CHECK-LABEL: addflag8rr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb %sil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x00,0xf7]
-; CHECK-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
-; CHECK-NEXT: movl $255, %eax # encoding: [0xb8,0xff,0x00,0x00,0x00]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
+; CHECK-NEXT: movl $255, %ecx # encoding: [0xb9,0xff,0x00,0x00,0x00]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -311,10 +311,10 @@ entry:
define i16 @addflag16rr(i16 noundef %a, i16 noundef %b) {
; CHECK-LABEL: addflag16rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw %si, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x01,0xf7]
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: addw %si, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x01,0xf7]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -325,9 +325,9 @@ entry:
define i32 @addflag32rr(i32 noundef %a, i32 noundef %b) {
; CHECK-LABEL: addflag32rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x01,0xf7]
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: addl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x01,0xf7]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i32 @llvm.uadd.sat.i32(i32 %a, i32 %b)
@@ -337,9 +337,9 @@ entry:
define i64 @addflag64rr(i64 noundef %a, i64 noundef %b) {
; CHECK-LABEL: addflag64rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x01,0xf7]
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: addq %rsi, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x01,0xf7]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i64 @llvm.uadd.sat.i64(i64 %a, i64 %b)
@@ -350,9 +350,9 @@ define i8 @addflag8rm(i8 noundef %a, ptr %b) {
; CHECK-LABEL: addflag8rm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb (%rsi), %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x02,0x3e]
-; CHECK-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
-; CHECK-NEXT: movl $255, %eax # encoding: [0xb8,0xff,0x00,0x00,0x00]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
+; CHECK-NEXT: movl $255, %ecx # encoding: [0xb9,0xff,0x00,0x00,0x00]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -364,10 +364,10 @@ entry:
define i16 @addflag16rm(i16 noundef %a, ptr %b) {
; CHECK-LABEL: addflag16rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw (%rsi), %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x03,0x3e]
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: addw (%rsi), %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x03,0x3e]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -379,9 +379,9 @@ entry:
define i32 @addflag32rm(i32 noundef %a, ptr %b) {
; CHECK-LABEL: addflag32rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl (%rsi), %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x03,0x3e]
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: addl (%rsi), %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x03,0x3e]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i32, ptr %b
@@ -392,9 +392,9 @@ entry:
define i64 @addflag64rm(i64 noundef %a, ptr %b) {
; CHECK-LABEL: addflag64rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq (%rsi), %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x03,0x3e]
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: addq (%rsi), %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x03,0x3e]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i64, ptr %b
@@ -405,10 +405,10 @@ entry:
define i16 @addflag16ri8(i16 noundef %a) {
; CHECK-LABEL: addflag16ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw $123, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xc7,0x7b]
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: addw $123, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x83,0xc7,0x7b]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -419,9 +419,9 @@ entry:
define i32 @addflag32ri8(i32 noundef %a) {
; CHECK-LABEL: addflag32ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xc7,0x7b]
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: addl $123, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xc7,0x7b]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i32 @llvm.uadd.sat.i32(i32 %a, i32 123)
@@ -431,9 +431,9 @@ entry:
define i64 @addflag64ri8(i64 noundef %a) {
; CHECK-LABEL: addflag64ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xc7,0x7b]
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: addq $123, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x83,0xc7,0x7b]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i64 @llvm.uadd.sat.i64(i64 %a, i64 123)
@@ -444,9 +444,9 @@ define i8 @addflag8ri(i8 noundef %a) {
; CHECK-LABEL: addflag8ri:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb $123, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xc7,0x7b]
-; CHECK-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
-; CHECK-NEXT: movl $255, %eax # encoding: [0xb8,0xff,0x00,0x00,0x00]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
+; CHECK-NEXT: movl $255, %ecx # encoding: [0xb9,0xff,0x00,0x00,0x00]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -457,11 +457,11 @@ entry:
define i16 @addflag16ri(i16 noundef %a) {
; CHECK-LABEL: addflag16ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addw $1234, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x81,0xc7,0xd2,0x04]
+; CHECK-NEXT: addw $1234, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x81,0xc7,0xd2,0x04]
; CHECK-NEXT: # imm = 0x4D2
-; CHECK-NEXT: movl $65535, %eax # encoding: [0xb8,0xff,0xff,0x00,0x00]
+; CHECK-NEXT: movl $65535, %ecx # encoding: [0xb9,0xff,0xff,0x00,0x00]
; CHECK-NEXT: # imm = 0xFFFF
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -472,10 +472,10 @@ entry:
define i32 @addflag32ri(i32 noundef %a) {
; CHECK-LABEL: addflag32ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: addl $123456, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovael %ecx, %eax # encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i32 @llvm.uadd.sat.i32(i32 %a, i32 123456)
@@ -485,10 +485,10 @@ entry:
define i64 @addflag64ri(i64 noundef %a) {
; CHECK-LABEL: addflag64ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: addq $123456, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x81,0xc7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: movq $-1, %rax # encoding: [0x48,0xc7,0xc0,0xff,0xff,0xff,0xff]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: movq $-1, %rcx # encoding: [0x48,0xc7,0xc1,0xff,0xff,0xff,0xff]
+; CHECK-NEXT: cmovbq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x42,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%add = call i64 @llvm.uadd.sat.i64(i64 %a, i64 123456)
diff --git a/llvm/test/CodeGen/X86/apx/cfcmov.ll b/llvm/test/CodeGen/X86/apx/cfcmov.ll
new file mode 100644
index 0000000..f643120
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/cfcmov.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+cf -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+cf -x86-cmov-converter=false -verify-machineinstrs | FileCheck %s
+
+define i8 @cfcmov8rr(i8 %0) {
+; CHECK-LABEL: cfcmov8rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpb $1, %dil
+; CHECK-NEXT: cfcmovel %edi, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp eq i8 %0, 1
+ %3 = select i1 %2, i8 %0, i8 0
+ ret i8 %3
+}
+
+define i16 @cfcmov16rr(i16 %0) {
+; CHECK-LABEL: cfcmov16rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpw $1, %di
+; CHECK-NEXT: cfcmovnel %edi, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp ne i16 %0, 1
+ %3 = select i1 %2, i16 %0, i16 0
+ ret i16 %3
+}
+
+define i32 @cfcmov32rr(i32 %0) {
+; CHECK-LABEL: cfcmov32rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpl $2, %edi
+; CHECK-NEXT: cfcmovael %edi, %eax
+; CHECK-NEXT: retq
+ %2 = icmp ugt i32 %0, 1
+ %3 = select i1 %2, i32 %0, i32 0
+ ret i32 %3
+}
+
+define i64 @cfcmov64rr(i64 %0) {
+; CHECK-LABEL: cfcmov64rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: cfcmoveq %rdi, %rax
+; CHECK-NEXT: retq
+ %2 = icmp ult i64 %0, 1
+ %3 = select i1 %2, i64 %0, i64 0
+ ret i64 %3
+}
+
+define i8 @cfcmov8rr_inv(i8 %0) {
+; CHECK-LABEL: cfcmov8rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpb $1, %dil
+; CHECK-NEXT: cfcmovnel %edi, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp eq i8 %0, 1
+ %3 = select i1 %2, i8 0, i8 %0
+ ret i8 %3
+}
+
+define i16 @cfcmov16rr_inv(i16 %0) {
+; CHECK-LABEL: cfcmov16rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpw $1, %di
+; CHECK-NEXT: cfcmovel %edi, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+ %2 = icmp ne i16 %0, 1
+ %3 = select i1 %2, i16 0, i16 %0
+ ret i16 %3
+}
+
+define i32 @cfcmov32rr_inv(i32 %0) {
+; CHECK-LABEL: cfcmov32rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpl $2, %edi
+; CHECK-NEXT: cfcmovbl %edi, %eax
+; CHECK-NEXT: retq
+ %2 = icmp ugt i32 %0, 1
+ %3 = select i1 %2, i32 0, i32 %0
+ ret i32 %3
+}
+
+define i64 @cfcmov64rr_inv(i64 %0) {
+; CHECK-LABEL: cfcmov64rr_inv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cmpq $2, %rdi
+; CHECK-NEXT: cfcmovaeq %rdi, %rax
+; CHECK-NEXT: retq
+ %2 = icmp ule i64 %0, 1
+ %3 = select i1 %2, i64 0, i64 %0
+ ret i64 %3
+}
diff --git a/llvm/test/CodeGen/X86/apx/domain-reassignment.mir b/llvm/test/CodeGen/X86/apx/domain-reassignment.mir
new file mode 100644
index 0000000..7352aa2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/domain-reassignment.mir
@@ -0,0 +1,929 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass x86-domain-reassignment -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+ndd -o - %s | FileCheck %s
+--- |
+ ; ModuleID = '../test/CodeGen/X86/gpr-to-mask.ll'
+ source_filename = "../test/CodeGen/X86/gpr-to-mask.ll"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-unknown"
+
+ define void @test_fcmp_storefloat(i1 %cond, ptr %fptr, float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) #0 {
+ entry:
+ br i1 %cond, label %if, label %else
+
+ if: ; preds = %entry
+ %cmp1 = fcmp oeq float %f3, %f4
+ br label %exit
+
+ else: ; preds = %entry
+ %cmp2 = fcmp oeq float %f5, %f6
+ br label %exit
+
+ exit: ; preds = %else, %if
+ %val = phi i1 [ %cmp1, %if ], [ %cmp2, %else ]
+ %selected = select i1 %val, float %f1, float %f2
+ store float %selected, ptr %fptr
+ ret void
+ }
+
+ define void @test_8bitops() #0 {
+ ret void
+ }
+ define void @test_16bitops() #0 {
+ ret void
+ }
+ define void @test_32bitops() #0 {
+ ret void
+ }
+ define void @test_64bitops() #0 {
+ ret void
+ }
+ define void @test_16bitext() #0 {
+ ret void
+ }
+ define void @test_32bitext() #0 {
+ ret void
+ }
+ define void @test_64bitext() #0 {
+ ret void
+ }
+ ; Note that this function need to be compiled with -global-isel
+ ; to obtain testable MIR
+ define void @test_unused(i64 %0) #0 {
+ %unused = lshr i64 %0, 7
+ ret void
+ }
+...
+---
+name: test_fcmp_storefloat
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr8, preferred-register: '' }
+ - { id: 1, class: gr8, preferred-register: '' }
+ - { id: 2, class: gr8, preferred-register: '' }
+ - { id: 3, class: gr32, preferred-register: '' }
+ - { id: 4, class: gr64, preferred-register: '' }
+ - { id: 5, class: vr128x, preferred-register: '' }
+ - { id: 6, class: fr32x, preferred-register: '' }
+ - { id: 7, class: fr32x, preferred-register: '' }
+ - { id: 8, class: fr32x, preferred-register: '' }
+ - { id: 9, class: fr32x, preferred-register: '' }
+ - { id: 10, class: fr32x, preferred-register: '' }
+ - { id: 11, class: gr8, preferred-register: '' }
+ - { id: 12, class: vk1, preferred-register: '' }
+ - { id: 13, class: gr32, preferred-register: '' }
+ - { id: 14, class: vk1, preferred-register: '' }
+ - { id: 15, class: gr32, preferred-register: '' }
+ - { id: 16, class: gr32, preferred-register: '' }
+ - { id: 17, class: gr32, preferred-register: '' }
+ - { id: 18, class: vk1wm, preferred-register: '' }
+ - { id: 19, class: vr128x, preferred-register: '' }
+ - { id: 20, class: vr128, preferred-register: '' }
+ - { id: 21, class: vr128, preferred-register: '' }
+ - { id: 22, class: fr32x, preferred-register: '' }
+liveins:
+ - { reg: '$edi', virtual-reg: '%3' }
+ - { reg: '$rsi', virtual-reg: '%4' }
+ - { reg: '$xmm0', virtual-reg: '%5' }
+ - { reg: '$xmm1', virtual-reg: '%6' }
+ - { reg: '$xmm2', virtual-reg: '%7' }
+ - { reg: '$xmm3', virtual-reg: '%8' }
+ - { reg: '$xmm4', virtual-reg: '%9' }
+ - { reg: '$xmm5', virtual-reg: '%10' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_fcmp_storefloat
+ ; CHECK: bb.0.entry:
+ ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK: liveins: $edi, $rsi, $xmm0, $xmm1, $xmm2, $xmm3, $xmm4, $xmm5
+ ; CHECK: [[COPY:%[0-9]+]]:fr32x = COPY $xmm5
+ ; CHECK: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm4
+ ; CHECK: [[COPY2:%[0-9]+]]:fr32x = COPY $xmm3
+ ; CHECK: [[COPY3:%[0-9]+]]:fr32x = COPY $xmm2
+ ; CHECK: [[COPY4:%[0-9]+]]:fr32x = COPY $xmm1
+ ; CHECK: [[COPY5:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY6:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK: [[COPY7:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY8:%[0-9]+]]:gr8 = COPY [[COPY7]].sub_8bit
+ ; CHECK: TEST8ri killed [[COPY8]], 1, implicit-def $eflags
+ ; CHECK: JCC_1 %bb.2, 4, implicit $eflags
+ ; CHECK: JMP_1 %bb.1
+ ; CHECK: bb.1.if:
+ ; CHECK: successors: %bb.3(0x80000000)
+ ; CHECK: [[VCMPSSZrri:%[0-9]+]]:vk1 = VCMPSSZrri [[COPY3]], [[COPY2]], 0
+ ; CHECK: [[COPY9:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri]]
+ ; CHECK: [[COPY10:%[0-9]+]]:vk8 = COPY [[COPY9]]
+ ; CHECK: JMP_1 %bb.3
+ ; CHECK: bb.2.else:
+ ; CHECK: successors: %bb.3(0x80000000)
+ ; CHECK: [[VCMPSSZrri1:%[0-9]+]]:vk1 = VCMPSSZrri [[COPY1]], [[COPY]], 0
+ ; CHECK: [[COPY11:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri1]]
+ ; CHECK: [[COPY12:%[0-9]+]]:vk8 = COPY [[COPY11]]
+ ; CHECK: bb.3.exit:
+ ; CHECK: [[PHI:%[0-9]+]]:vk8 = PHI [[COPY12]], %bb.2, [[COPY10]], %bb.1
+ ; CHECK: [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
+ ; CHECK: [[COPY13:%[0-9]+]]:vk32 = COPY [[PHI]]
+ ; CHECK: [[COPY14:%[0-9]+]]:vk1wm = COPY [[COPY13]]
+ ; CHECK: [[COPY15:%[0-9]+]]:vr128x = COPY [[COPY4]]
+ ; CHECK: [[DEF1:%[0-9]+]]:vr128 = IMPLICIT_DEF
+ ; CHECK: [[VMOVSSZrrk:%[0-9]+]]:vr128 = VMOVSSZrrk [[COPY15]], killed [[COPY14]], killed [[DEF1]], [[COPY5]]
+ ; CHECK: [[COPY16:%[0-9]+]]:fr32x = COPY [[VMOVSSZrrk]]
+ ; CHECK: VMOVSSZmr [[COPY6]], 1, $noreg, 0, $noreg, killed [[COPY16]] :: (store (s32) into %ir.fptr)
+ ; CHECK: RET 0
+ bb.0.entry:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $edi, $rsi, $xmm0, $xmm1, $xmm2, $xmm3, $xmm4, $xmm5
+
+ %10 = COPY $xmm5
+ %9 = COPY $xmm4
+ %8 = COPY $xmm3
+ %7 = COPY $xmm2
+ %6 = COPY $xmm1
+ %5 = COPY $xmm0
+ %4 = COPY $rsi
+ %3 = COPY $edi
+ %11 = COPY %3.sub_8bit
+ TEST8ri killed %11, 1, implicit-def $eflags
+ JCC_1 %bb.2, 4, implicit $eflags
+ JMP_1 %bb.1
+
+ bb.1.if:
+ successors: %bb.3(0x80000000)
+
+ %14 = VCMPSSZrri %7, %8, 0, implicit $mxcsr
+
+ ; check that cross domain copies are replaced with same domain copies.
+
+ %15 = COPY %14
+ %0 = COPY %15.sub_8bit
+ JMP_1 %bb.3
+
+ bb.2.else:
+ successors: %bb.3(0x80000000)
+ %12 = VCMPSSZrri %9, %10, 0, implicit $mxcsr
+
+ ; check that cross domain copies are replaced with same domain copies.
+
+ %13 = COPY %12
+ %1 = COPY %13.sub_8bit
+
+ bb.3.exit:
+
+ ; check PHI, IMPLICIT_DEF, and INSERT_SUBREG replacers.
+
+ %2 = PHI %1, %bb.2, %0, %bb.1
+ %17 = IMPLICIT_DEF
+ %16 = INSERT_SUBREG %17, %2, %subreg.sub_8bit_hi
+ %18 = COPY %16
+ %19 = COPY %6
+ %21 = IMPLICIT_DEF
+ %20 = VMOVSSZrrk %19, killed %18, killed %21, %5
+ %22 = COPY %20
+ VMOVSSZmr %4, 1, $noreg, 0, $noreg, killed %22 :: (store (s32) into %ir.fptr)
+ RET 0
+
+...
+---
+name: test_8bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vr512, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: vk8, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr8, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: vk8wm, preferred-register: '' }
+ - { id: 11, class: vr512, preferred-register: '' }
+ - { id: 12, class: gr8, preferred-register: '' }
+ - { id: 13, class: gr8, preferred-register: '' }
+ - { id: 14, class: gr8, preferred-register: '' }
+ - { id: 15, class: gr8, preferred-register: '' }
+ - { id: 16, class: gr8, preferred-register: '' }
+ - { id: 17, class: gr8, preferred-register: '' }
+ - { id: 18, class: gr8, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+ - { reg: '$zmm2', virtual-reg: '%3' }
+ - { reg: '$zmm3', virtual-reg: '%4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_8bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[COPY3:%[0-9]+]]:vr512 = COPY $zmm2
+ ; CHECK: [[COPY4:%[0-9]+]]:vr512 = COPY $zmm3
+ ; CHECK: [[VCMPPDZrri:%[0-9]+]]:vk8 = VCMPPDZrri [[COPY3]], [[COPY4]], 0
+ ; CHECK: [[COPY5:%[0-9]+]]:vk32 = COPY [[VCMPPDZrri]]
+ ; CHECK: [[COPY6:%[0-9]+]]:vk8 = COPY [[COPY5]]
+ ; CHECK: [[KSHIFTRBri:%[0-9]+]]:vk8 = KSHIFTRBri [[COPY6]], 2
+ ; CHECK: [[KSHIFTLBri:%[0-9]+]]:vk8 = KSHIFTLBri [[KSHIFTRBri]], 1
+ ; CHECK: [[KNOTBrr:%[0-9]+]]:vk8 = KNOTBrr [[KSHIFTLBri]]
+ ; CHECK: [[KORBrr:%[0-9]+]]:vk8 = KORBrr [[KNOTBrr]], [[KSHIFTRBri]]
+ ; CHECK: [[KANDBrr:%[0-9]+]]:vk8 = KANDBrr [[KORBrr]], [[KSHIFTLBri]]
+ ; CHECK: [[KXORBrr:%[0-9]+]]:vk8 = KXORBrr [[KANDBrr]], [[KSHIFTRBri]]
+ ; CHECK: [[KADDBrr:%[0-9]+]]:vk8 = KADDBrr [[KXORBrr]], [[KNOTBrr]]
+ ; CHECK: [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
+ ; CHECK: [[COPY7:%[0-9]+]]:vk32 = COPY [[KADDBrr]]
+ ; CHECK: [[COPY8:%[0-9]+]]:vk8wm = COPY [[COPY7]]
+ ; CHECK: [[VMOVAPDZrrk:%[0-9]+]]:vr512 = VMOVAPDZrrk [[COPY2]], killed [[COPY8]], [[COPY1]]
+ ; CHECK: VMOVAPDZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPDZrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+ %3 = COPY $zmm2
+ %4 = COPY $zmm3
+
+ %5 = VCMPPDZrri %3, %4, 0, implicit $mxcsr
+ %6 = COPY %5
+ %7 = COPY %6.sub_8bit
+
+ %12 = SHR8ri_ND %7, 2, implicit-def dead $eflags
+ %13 = SHL8ri_ND %12, 1, implicit-def dead $eflags
+ %14 = NOT8r_ND %13
+ %15 = OR8rr_ND %14, %12, implicit-def dead $eflags
+ %16 = AND8rr_ND %15, %13, implicit-def dead $eflags
+ %17 = XOR8rr_ND %16, %12, implicit-def dead $eflags
+ %18 = ADD8rr_ND %17, %14, implicit-def dead $eflags
+
+ %8 = IMPLICIT_DEF
+ %9 = INSERT_SUBREG %8, %18, %subreg.sub_8bit_hi
+ %10 = COPY %9
+ %11 = VMOVAPDZrrk %2, killed %10, %1
+ VMOVAPDZmr %0, 1, $noreg, 0, $noreg, killed %11
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; TEST8rr %18, %18, implicit-def $eflags
+ ; JCC_1 %bb.1, 4, implicit $eflags
+ ; JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_16bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vr512, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: vk16, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr16, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: vk16wm, preferred-register: '' }
+ - { id: 11, class: vr512, preferred-register: '' }
+ - { id: 12, class: gr16, preferred-register: '' }
+ - { id: 13, class: gr16, preferred-register: '' }
+ - { id: 14, class: gr16, preferred-register: '' }
+ - { id: 15, class: gr16, preferred-register: '' }
+ - { id: 16, class: gr16, preferred-register: '' }
+ - { id: 17, class: gr16, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+ - { reg: '$zmm2', virtual-reg: '%3' }
+ - { reg: '$zmm3', virtual-reg: '%4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_16bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[COPY3:%[0-9]+]]:vr512 = COPY $zmm2
+ ; CHECK: [[COPY4:%[0-9]+]]:vr512 = COPY $zmm3
+ ; CHECK: [[VCMPPSZrri:%[0-9]+]]:vk16 = VCMPPSZrri [[COPY3]], [[COPY4]], 0
+ ; CHECK: [[COPY5:%[0-9]+]]:vk32 = COPY [[VCMPPSZrri]]
+ ; CHECK: [[COPY6:%[0-9]+]]:vk16 = COPY [[COPY5]]
+ ; CHECK: [[KSHIFTRWri:%[0-9]+]]:vk16 = KSHIFTRWri [[COPY6]], 2
+ ; CHECK: [[KSHIFTLWri:%[0-9]+]]:vk16 = KSHIFTLWri [[KSHIFTRWri]], 1
+ ; CHECK: [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[KSHIFTLWri]]
+ ; CHECK: [[KORWrr:%[0-9]+]]:vk16 = KORWrr [[KNOTWrr]], [[KSHIFTRWri]]
+ ; CHECK: [[KANDWrr:%[0-9]+]]:vk16 = KANDWrr [[KORWrr]], [[KSHIFTLWri]]
+ ; CHECK: [[KXORWrr:%[0-9]+]]:vk16 = KXORWrr [[KANDWrr]], [[KSHIFTRWri]]
+ ; CHECK: [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
+ ; CHECK: [[COPY7:%[0-9]+]]:vk32 = COPY [[KXORWrr]]
+ ; CHECK: [[COPY8:%[0-9]+]]:vk16wm = COPY [[COPY7]]
+ ; CHECK: [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY8]], [[COPY1]]
+ ; CHECK: VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+ %3 = COPY $zmm2
+ %4 = COPY $zmm3
+
+ %5 = VCMPPSZrri %3, %4, 0, implicit $mxcsr
+ %6 = COPY %5
+ %7 = COPY %6.sub_16bit
+
+ %12 = SHR16ri_ND %7, 2, implicit-def dead $eflags
+ %13 = SHL16ri_ND %12, 1, implicit-def dead $eflags
+ %14 = NOT16r_ND %13
+ %15 = OR16rr_ND %14, %12, implicit-def dead $eflags
+ %16 = AND16rr_ND %15, %13, implicit-def dead $eflags
+ %17 = XOR16rr_ND %16, %12, implicit-def dead $eflags
+
+ %8 = IMPLICIT_DEF
+ %9 = INSERT_SUBREG %8, %17, %subreg.sub_16bit
+ %10 = COPY %9
+ %11 = VMOVAPSZrrk %2, killed %10, %1
+ VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %11
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; FIXME TEST16rr %17, %17, implicit-def $eflags
+ ; FIXME JCC_1 %bb.1, 4, implicit $eflags
+ ; FIXME JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_32bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk32wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr32, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: gr32, preferred-register: '' }
+ - { id: 11, class: gr32, preferred-register: '' }
+ - { id: 12, class: gr32, preferred-register: '' }
+ - { id: 13, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_32bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVDkm:%[0-9]+]]:vk32 = KMOVDkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[KSHIFTRDri:%[0-9]+]]:vk32 = KSHIFTRDri [[KMOVDkm]], 2
+ ; CHECK: [[KSHIFTLDri:%[0-9]+]]:vk32 = KSHIFTLDri [[KSHIFTRDri]], 1
+ ; CHECK: [[KNOTDrr:%[0-9]+]]:vk32 = KNOTDrr [[KSHIFTLDri]]
+ ; CHECK: [[KORDrr:%[0-9]+]]:vk32 = KORDrr [[KNOTDrr]], [[KSHIFTRDri]]
+ ; CHECK: [[KANDDrr:%[0-9]+]]:vk32 = KANDDrr [[KORDrr]], [[KSHIFTLDri]]
+ ; CHECK: [[KXORDrr:%[0-9]+]]:vk32 = KXORDrr [[KANDDrr]], [[KSHIFTRDri]]
+ ; CHECK: [[KANDNDrr:%[0-9]+]]:vk32 = KANDNDrr [[KXORDrr]], [[KORDrr]]
+ ; CHECK: [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[KANDNDrr]], [[KXORDrr]]
+ ; CHECK: [[COPY3:%[0-9]+]]:vk32wm = COPY [[KADDDrr]]
+ ; CHECK: [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOV32rm %0, 1, $noreg, 0, $noreg
+ %6 = SHR32ri_ND %5, 2, implicit-def dead $eflags
+ %7 = SHL32ri_ND %6, 1, implicit-def dead $eflags
+ %8 = NOT32r_ND %7
+ %9 = OR32rr_ND %8, %6, implicit-def dead $eflags
+ %10 = AND32rr_ND %9, %7, implicit-def dead $eflags
+ %11 = XOR32rr_ND %10, %6, implicit-def dead $eflags
+ %12 = ANDN32rr %11, %9, implicit-def dead $eflags
+ %13 = ADD32rr_ND %12, %11, implicit-def dead $eflags
+
+ %3 = COPY %13
+ %4 = VMOVDQU16Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; FIXME TEST32rr %13, %13, implicit-def $eflags
+ ; FIXME JCC_1 %bb.1, 4, implicit $eflags
+ ; FIXME JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_64bitops
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk64wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr64, preferred-register: '' }
+ - { id: 6, class: gr64, preferred-register: '' }
+ - { id: 7, class: gr64, preferred-register: '' }
+ - { id: 8, class: gr64, preferred-register: '' }
+ - { id: 9, class: gr64, preferred-register: '' }
+ - { id: 10, class: gr64, preferred-register: '' }
+ - { id: 11, class: gr64, preferred-register: '' }
+ - { id: 12, class: gr64, preferred-register: '' }
+ - { id: 13, class: gr64, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ ; CHECK-LABEL: name: test_64bitops
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVQkm:%[0-9]+]]:vk64 = KMOVQkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[KSHIFTRQri:%[0-9]+]]:vk64 = KSHIFTRQri [[KMOVQkm]], 2
+ ; CHECK: [[KSHIFTLQri:%[0-9]+]]:vk64 = KSHIFTLQri [[KSHIFTRQri]], 1
+ ; CHECK: [[KNOTQrr:%[0-9]+]]:vk64 = KNOTQrr [[KSHIFTLQri]]
+ ; CHECK: [[KORQrr:%[0-9]+]]:vk64 = KORQrr [[KNOTQrr]], [[KSHIFTRQri]]
+ ; CHECK: [[KANDQrr:%[0-9]+]]:vk64 = KANDQrr [[KORQrr]], [[KSHIFTLQri]]
+ ; CHECK: [[KXORQrr:%[0-9]+]]:vk64 = KXORQrr [[KANDQrr]], [[KSHIFTRQri]]
+ ; CHECK: [[KANDNQrr:%[0-9]+]]:vk64 = KANDNQrr [[KXORQrr]], [[KORQrr]]
+ ; CHECK: [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[KANDNQrr]], [[KXORQrr]]
+ ; CHECK: [[COPY3:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
+ ; CHECK: [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: bb.2:
+ ; CHECK: RET 0
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOV64rm %0, 1, $noreg, 0, $noreg
+ %6 = SHR64ri_ND %5, 2, implicit-def dead $eflags
+ %7 = SHL64ri_ND %6, 1, implicit-def dead $eflags
+ %8 = NOT64r_ND %7
+ %9 = OR64rr_ND %8, %6, implicit-def dead $eflags
+ %10 = AND64rr_ND %9, %7, implicit-def dead $eflags
+ %11 = XOR64rr_ND %10, %6, implicit-def dead $eflags
+ %12 = ANDN64rr %11, %9, implicit-def dead $eflags
+ %13 = ADD64rr_ND %12, %11, implicit-def dead $eflags
+
+ %3 = COPY %13
+ %4 = VMOVDQU8Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+
+ ; FIXME We can't replace TEST with KTEST due to flag differences
+ ; FIXME TEST64rr %13, %13, implicit-def $eflags
+ ; FIXME JCC_1 %bb.1, 4, implicit $eflags
+ ; FIXME JMP_1 %bb.2
+
+ bb.1:
+
+ bb.2:
+ RET 0
+
+...
+---
+name: test_16bitext
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk16wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr16, preferred-register: '' }
+ - { id: 6, class: gr16, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ ; CHECK-LABEL: name: test_16bitext
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY3:%[0-9]+]]:vk16 = COPY [[KMOVBkm]]
+ ; CHECK: [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[COPY3]]
+ ; CHECK: [[COPY4:%[0-9]+]]:vk16wm = COPY [[KNOTWrr]]
+ ; CHECK: [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY4]], [[COPY1]]
+ ; CHECK: VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
+ ; CHECK: RET 0
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOVZX16rm8 %0, 1, $noreg, 0, $noreg
+ %6 = NOT16r_ND %5
+
+ %3 = COPY %6
+ %4 = VMOVAPSZrrk %2, killed %3, %1
+ VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %4
+ RET 0
+
+...
+---
+name: test_32bitext
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk64wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr32, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ ; CHECK-LABEL: name: test_32bitext
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY3:%[0-9]+]]:vk32 = COPY [[KMOVBkm]]
+ ; CHECK: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY4:%[0-9]+]]:vk32 = COPY [[KMOVWkm]]
+ ; CHECK: [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[COPY3]], [[COPY4]]
+ ; CHECK: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDDrr]]
+ ; CHECK: [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
+ ; CHECK: RET 0
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg
+ %6 = MOVZX32rm16 %0, 1, $noreg, 0, $noreg
+ %7 = ADD32rr_ND %5, %6, implicit-def dead $eflags
+
+ %3 = COPY %7
+ %4 = VMOVDQU16Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+ RET 0
+
+...
+---
+name: test_64bitext
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64, preferred-register: '' }
+ - { id: 1, class: vr512, preferred-register: '' }
+ - { id: 2, class: vr512, preferred-register: '' }
+ - { id: 3, class: vk64wm, preferred-register: '' }
+ - { id: 4, class: vr512, preferred-register: '' }
+ - { id: 5, class: gr64, preferred-register: '' }
+ - { id: 6, class: gr64, preferred-register: '' }
+ - { id: 7, class: gr64, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi, $zmm0, $zmm1
+
+ ; CHECK-LABEL: name: test_64bitext
+ ; CHECK: liveins: $rdi, $zmm0, $zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+ ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY3:%[0-9]+]]:vk64 = COPY [[KMOVBkm]]
+ ; CHECK: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
+ ; CHECK: [[COPY4:%[0-9]+]]:vk64 = COPY [[KMOVWkm]]
+ ; CHECK: [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[COPY3]], [[COPY4]]
+ ; CHECK: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
+ ; CHECK: [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
+ ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
+ ; CHECK: RET 0
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+
+ %5 = MOVZX64rm8 %0, 1, $noreg, 0, $noreg
+ %6 = MOVZX64rm16 %0, 1, $noreg, 0, $noreg
+ %7 = ADD64rr_ND %5, %6, implicit-def dead $eflags
+
+ %3 = COPY %7
+ %4 = VMOVDQU8Zrrk %2, killed %3, %1
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
+ RET 0
+
+...
+---
+name: test_unused
+alignment: 16
+exposesReturnsTwice: false
+legalized: true
+regBankSelected: true
+selected: true
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHCatchret: false
+hasEHScopes: false
+hasEHFunclets: false
+isOutlined: false
+debugInstrRef: false
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+# Note that this test is supposed to have registers without classes
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+ - { reg: '$rdi', virtual-reg: '' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 1
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo: {}
+body: |
+ bb.1 (%ir-block.1):
+ liveins: $rdi
+
+ RET 0
+
+...
diff --git a/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir b/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir
index d6a9cda..e81a448 100644
--- a/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir
+++ b/llvm/test/CodeGen/X86/apx/flags-copy-lowering.mir
@@ -29,6 +29,18 @@
call void @foo()
ret void
}
+
+ define void @test_cmov(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_cfcmov(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
...
---
name: test_adc
@@ -166,3 +178,93 @@ body: |
RET 0
...
+---
+name: test_cmov
+# CHECK-LABEL: name: test_cmov
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: $rdi, $rsi
+
+ %0:gr64 = COPY $rdi
+ %1:gr64 = COPY $rsi
+ CMP64rr %0, %1, implicit-def $eflags
+ %2:gr64 = COPY $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
+ ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def $eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+
+ $eflags = COPY %2
+ %3:gr64 = CMOV64rr_ND %0, %1, 7, implicit $eflags
+ %4:gr64 = CMOV64rr_ND %0, %1, 2, implicit $eflags
+ %5:gr64 = CMOV64rr_ND %0, %1, 4, implicit $eflags
+ %6:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NOT: $eflags =
+ ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %3:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %4:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %5:gr64 = CMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %6:gr64 = CMOV64rr_ND %0, %1, 4, implicit killed $eflags
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %3
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %4
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %5
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %6
+
+ RET 0
+...
+---
+name: test_cfcmov
+# CHECK-LABEL: name: test_cfcmov
+liveins:
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: $rdi, $rsi
+
+ %0:gr64 = COPY $rdi
+ %1:gr64 = COPY $rsi
+ CMP64rr %0, %1, implicit-def $eflags
+ %2:gr64 = COPY $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
+ ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NOT: COPY{{( killed)?}} $eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def $eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+
+ $eflags = COPY %2
+ %3:gr64 = CFCMOV64rr %1, 7, implicit $eflags
+ %4:gr64 = CFCMOV64rr %1, 2, implicit $eflags
+ %5:gr64 = CFCMOV64rr_ND %0, %1, 4, implicit $eflags
+ %6:gr64 = CFCMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NOT: $eflags =
+ ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %3:gr64 = CFCMOV64rr %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %4:gr64 = CFCMOV64rr %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %5:gr64 = CFCMOV64rr_ND %0, %1, 5, implicit killed $eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
+ ; CHECK-NEXT: %6:gr64 = CFCMOV64rr_ND %0, %1, 4, implicit killed $eflags
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %3
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %4
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %5
+ MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %6
+
+ RET 0
+...
diff --git a/llvm/test/CodeGen/X86/apx/foldimmediate.mir b/llvm/test/CodeGen/X86/apx/foldimmediate.mir
new file mode 100644
index 0000000..310fc64
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/foldimmediate.mir
@@ -0,0 +1,70 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+# RUN: llc -mtriple=x86_64-- -run-pass=peephole-opt %s -o - | FileCheck %s
+--- |
+ define void @foldImmediate() { ret void }
+...
+---
+# Check that immediates can be folded into ALU instructions.
+name: foldImmediate
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+ - { id: 3, class: gr32 }
+ - { id: 4, class: gr32 }
+ - { id: 5, class: gr32 }
+ - { id: 6, class: gr32 }
+ - { id: 7, class: gr64 }
+ - { id: 8, class: gr64 }
+ - { id: 9, class: gr64 }
+ - { id: 10, class: gr64 }
+ - { id: 11, class: gr64 }
+ - { id: 12, class: gr64 }
+ - { id: 13, class: gr64 }
+ - { id: 14, class: gr64 }
+ - { id: 15, class: gr64 }
+ - { id: 16, class: gr32 }
+ - { id: 17, class: gr64 }
+ - { id: 18, class: gr32 }
+
+body: |
+ bb.0:
+ liveins: $rdi, $rsi
+
+ ; CHECK-LABEL: name: foldImmediate
+ ; CHECK: liveins: $rdi, $rsi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 81
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NEXT: CTEST32ri [[COPY]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP32ri [[COPY]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[MOV32ri]], %subreg.sub_32bit
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK-NEXT: CTEST64ri32 [[COPY1]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP64ri32 [[COPY1]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP64rr [[SUBREG_TO_REG]], [[COPY1]], 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ %0 = MOV32ri 81
+ %1 = COPY $edi
+
+ CTEST32rr %0, %1, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ CCMP32rr %1, %0, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ %7 = SUBREG_TO_REG 0, killed %0:gr32, %subreg.sub_32bit
+ %8 = COPY $rsi
+
+ CTEST64rr %8, %7, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ CCMP64rr %8, %7, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+ CCMP64rr %7, %8, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+...
diff --git a/llvm/test/CodeGen/X86/apx/inc.ll b/llvm/test/CodeGen/X86/apx/inc.ll
index 613f786..a9c6d74 100644
--- a/llvm/test/CodeGen/X86/apx/inc.ll
+++ b/llvm/test/CodeGen/X86/apx/inc.ll
@@ -92,9 +92,9 @@ define i8 @uinc8r(i8 noundef %a) {
; CHECK-LABEL: uinc8r:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: incb %dil, %al
-; CHECK-NEXT: movzbl %al, %ecx
-; CHECK-NEXT: movl $255, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: movl $255, %ecx
+; CHECK-NEXT: cmovel %ecx, %eax
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
@@ -105,9 +105,9 @@ entry:
define i16 @uinc16r(i16 noundef %a) {
; CHECK-LABEL: uinc16r:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: incw %di, %cx
-; CHECK-NEXT: movl $65535, %eax # imm = 0xFFFF
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: incw %di, %ax
+; CHECK-NEXT: movl $65535, %ecx # imm = 0xFFFF
+; CHECK-NEXT: cmovel %ecx, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
@@ -118,9 +118,9 @@ entry:
define i32 @uinc32r(i32 noundef %a) {
; CHECK-LABEL: uinc32r:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: incl %edi, %ecx
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: incl %edi, %eax
+; CHECK-NEXT: movl $-1, %ecx
+; CHECK-NEXT: cmovel %ecx, %eax
; CHECK-NEXT: retq
entry:
%inc = call i32 @llvm.uadd.sat.i32(i32 %a, i32 1)
@@ -130,9 +130,9 @@ entry:
define i64 @uinc64r(i64 noundef %a) {
; CHECK-LABEL: uinc64r:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: incq %rdi, %rcx
-; CHECK-NEXT: movq $-1, %rax
-; CHECK-NEXT: cmovneq %rcx, %rax
+; CHECK-NEXT: incq %rdi, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: cmoveq %rcx, %rax
; CHECK-NEXT: retq
entry:
%inc = call i64 @llvm.uadd.sat.i64(i64 %a, i64 1)
diff --git a/llvm/test/CodeGen/X86/apx/shift-eflags.ll b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
index f34dc6c..932cdc1 100644
--- a/llvm/test/CodeGen/X86/apx/shift-eflags.ll
+++ b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
@@ -7,9 +7,8 @@
define i32 @ashr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: ashr_const:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
-; CHECK-NEXT: sarl $14, %edi, %edx
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: sarl $14, %edi, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = ashr i32 %a0, 14
%c = icmp eq i32 %s, 0
@@ -21,9 +20,8 @@ define i32 @ashr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @lshr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: lshr_const:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $-16384, %edi # imm = 0xC000
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = lshr i32 %a0, 14
%c = icmp eq i32 %s, 0
@@ -35,9 +33,8 @@ define i32 @lshr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @shl_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: shl_const:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $262143, %edi # imm = 0x3FFFF
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = shl i32 %a0, 14
%c = icmp eq i32 %s, 0
@@ -88,9 +85,8 @@ define i32 @shl_const_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @ashr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: ashr_const1:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
-; CHECK-NEXT: sarl %edi, %edx
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: sarl %edi, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = ashr i32 %a0, 1
%c = icmp eq i32 %s, 0
@@ -102,9 +98,8 @@ define i32 @ashr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @lshr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: lshr_const1:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $-2, %edi
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = lshr i32 %a0, 1
%c = icmp eq i32 %s, 0
@@ -116,9 +111,8 @@ define i32 @lshr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @shl_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: shl_const1:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: testl $2147483647, %edi # imm = 0x7FFFFFFF
-; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = shl i32 %a0, 1
%c = icmp eq i32 %s, 0
diff --git a/llvm/test/CodeGen/X86/apx/sub.ll b/llvm/test/CodeGen/X86/apx/sub.ll
index 4b0bd14..be0914c 100644
--- a/llvm/test/CodeGen/X86/apx/sub.ll
+++ b/llvm/test/CodeGen/X86/apx/sub.ll
@@ -299,10 +299,10 @@ declare i64 @llvm.usub.sat.i64(i64, i64)
define i8 @subflag8rr(i8 noundef %a, i8 noundef %b) {
; CHECK-LABEL: subflag8rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subb %sil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x28,0xf7]
-; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subb %sil, %dil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x28,0xf7]
+; CHECK-NEXT: movzbl %cl, %ecx # encoding: [0x0f,0xb6,0xc9]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -313,9 +313,9 @@ entry:
define i16 @subflag16rr(i16 noundef %a, i16 noundef %b) {
; CHECK-LABEL: subflag16rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw %si, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw %si, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x29,0xf7]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -326,9 +326,9 @@ entry:
define i32 @subflag32rr(i32 noundef %a, i32 noundef %b) {
; CHECK-LABEL: subflag32rr:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x29,0xf7]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 %b)
@@ -340,7 +340,7 @@ define i64 @subflag64rr(i64 noundef %a, i64 noundef %b) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 %b)
@@ -350,10 +350,10 @@ entry:
define i8 @subflag8rm(i8 noundef %a, ptr %b) {
; CHECK-LABEL: subflag8rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subb (%rsi), %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x2a,0x3e]
-; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subb (%rsi), %dil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x2a,0x3e]
+; CHECK-NEXT: movzbl %cl, %ecx # encoding: [0x0f,0xb6,0xc9]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -365,9 +365,9 @@ entry:
define i16 @subflag16rm(i16 noundef %a, ptr %b) {
; CHECK-LABEL: subflag16rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw (%rsi), %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw (%rsi), %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x2b,0x3e]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -379,9 +379,9 @@ entry:
define i32 @subflag32rm(i32 noundef %a, ptr %b) {
; CHECK-LABEL: subflag32rm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl (%rsi), %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl (%rsi), %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x2b,0x3e]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i32, ptr %b
@@ -394,7 +394,7 @@ define i64 @subflag64rm(i64 noundef %a, ptr %b) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq (%rsi), %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t = load i64, ptr %b
@@ -405,9 +405,9 @@ entry:
define i16 @subflag16ri8(i16 noundef %a) {
; CHECK-LABEL: subflag16ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw $123, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw $123, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xef,0x7b]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -418,9 +418,9 @@ entry:
define i32 @subflag32ri8(i32 noundef %a) {
; CHECK-LABEL: subflag32ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl $123, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xef,0x7b]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 123)
@@ -432,7 +432,7 @@ define i64 @subflag64ri8(i64 noundef %a) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 123)
@@ -442,10 +442,10 @@ entry:
define i8 @subflag8ri(i8 noundef %a) {
; CHECK-LABEL: subflag8ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subb $123, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xef,0x7b]
-; CHECK-NEXT: movzbl %al, %eax # encoding: [0x0f,0xb6,0xc0]
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subb $123, %dil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x80,0xef,0x7b]
+; CHECK-NEXT: movzbl %cl, %ecx # encoding: [0x0f,0xb6,0xc9]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -456,10 +456,10 @@ entry:
define i16 @subflag16ri(i16 noundef %a) {
; CHECK-LABEL: subflag16ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subw $1234, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x81,0xef,0xd2,0x04]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subw $1234, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x81,0xef,0xd2,0x04]
; CHECK-NEXT: # imm = 0x4D2
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -470,10 +470,10 @@ entry:
define i32 @subflag32ri(i32 noundef %a) {
; CHECK-LABEL: subflag32ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %ecx, %ecx # encoding: [0x31,0xc9]
-; CHECK-NEXT: subl $123456, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: subl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: cmovbl %ecx, %eax # encoding: [0x0f,0x42,0xc1]
+; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 123456)
@@ -486,7 +486,7 @@ define i64 @subflag64ri(i64 noundef %a) {
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: subq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: cmovaeq %rcx, %rax # encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 123456)
diff --git a/llvm/test/CodeGen/X86/asm-dialect-module.ll b/llvm/test/CodeGen/X86/asm-dialect-module.ll
new file mode 100644
index 0000000..2c00a44
--- /dev/null
+++ b/llvm/test/CodeGen/X86/asm-dialect-module.ll
@@ -0,0 +1,10 @@
+;; Test that we respect the assembler dialect when parsing module-level inline asm.
+; RUN: not llc < %s -mtriple=x86_64 2>&1 | FileCheck %s --check-prefix=ERR
+; RUN: llc < %s -mtriple=x86_64 -x86-asm-syntax=intel | FileCheck %s
+
+; ERR: <inline asm>:1:1: error: unknown use of instruction mnemonic without a size suffix
+
+; CHECK: .intel_syntax noprefix
+; CHECK: mov eax, eax
+
+module asm "mov eax, eax"
diff --git a/llvm/test/CodeGen/X86/avgceils.ll b/llvm/test/CodeGen/X86/avgceils.ll
new file mode 100644
index 0000000..4529ea2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgceils.ll
@@ -0,0 +1,3821 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psubb %xmm0, %xmm2
+; SSE-NEXT: paddb %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT: vpsubb %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %or = or <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = ashr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <16 x i8> %or, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE2-LABEL: test_ext_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm2, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm3, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm4
+; SSE2-NEXT: psubw %xmm1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE4-NEXT: paddw %xmm2, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm1
+; SSE4-NEXT: paddw %xmm3, %xmm1
+; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE4-NEXT: psubw %xmm2, %xmm0
+; SSE4-NEXT: psubw %xmm2, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm2, %xmm0
+; SSE4-NEXT: pand %xmm2, %xmm1
+; SSE4-NEXT: packuswb %xmm1, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i8> %a0 to <16 x i16>
+ %x1 = sext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %inc = add <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = ashr <16 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE2-LABEL: test_ext_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psubd %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm0
+; SSE4-NEXT: paddd %xmm2, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm1
+; SSE4-NEXT: paddd %xmm3, %xmm1
+; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE4-NEXT: psubd %xmm2, %xmm0
+; SSE4-NEXT: psubd %xmm2, %xmm1
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm2, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; SSE4-NEXT: packusdw %xmm1, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i16> %a0 to <8 x i32>
+ %x1 = sext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %inc = add <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = ashr <8 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = ashr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = sub <4 x i32> %or, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: psubq %xmm1, %xmm4
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm2
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm4
+; SSE4-NEXT: paddq %xmm2, %xmm4
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm0
+; SSE4-NEXT: paddq %xmm3, %xmm0
+; SSE4-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE4-NEXT: psubq %xmm1, %xmm4
+; SSE4-NEXT: psubq %xmm1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i32> %a0 to <4 x i64>
+ %x1 = sext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %inc = add <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %shift = ashr <4 x i64> %inc, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: psubq %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: por %xmm1, %xmm2
+; SSE4-NEXT: pxor %xmm0, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: psrad $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; SSE4-NEXT: psubq %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX2-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %or = or <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = ashr <2 x i64> %xor, <i64 1, i64 1>
+ %res = sub <2 x i64> %or, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %rdx, %rsi
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: movq %xmm1, %rdi
+; SSE2-NEXT: movq %rdi, %r8
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r9
+; SSE2-NEXT: movq %r9, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: addq %r9, %rdx
+; SSE2-NEXT: adcq %rsi, %r10
+; SSE2-NEXT: addq %rdi, %rax
+; SSE2-NEXT: adcq %rcx, %r8
+; SSE2-NEXT: addq $1, %rax
+; SSE2-NEXT: adcq $0, %r8
+; SSE2-NEXT: addq $1, %rdx
+; SSE2-NEXT: adcq $0, %r10
+; SSE2-NEXT: shldq $63, %rdx, %r10
+; SSE2-NEXT: shldq $63, %rax, %r8
+; SSE2-NEXT: movq %r8, %xmm0
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pextrq $1, %xmm0, %rax
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %xmm0, %rdx
+; SSE4-NEXT: movq %rdx, %rsi
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: pextrq $1, %xmm1, %rdi
+; SSE4-NEXT: movq %rdi, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: movq %xmm1, %r9
+; SSE4-NEXT: movq %r9, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: addq %r9, %rdx
+; SSE4-NEXT: adcq %rsi, %r10
+; SSE4-NEXT: addq %rdi, %rax
+; SSE4-NEXT: adcq %rcx, %r8
+; SSE4-NEXT: addq $1, %rax
+; SSE4-NEXT: adcq $0, %r8
+; SSE4-NEXT: addq $1, %rdx
+; SSE4-NEXT: adcq $0, %r10
+; SSE4-NEXT: shldq $63, %rdx, %r10
+; SSE4-NEXT: shldq $63, %rax, %r8
+; SSE4-NEXT: movq %r8, %xmm1
+; SSE4-NEXT: movq %r10, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: test_ext_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: movq %rax, %rcx
+; AVX-NEXT: sarq $63, %rcx
+; AVX-NEXT: vmovq %xmm0, %rdx
+; AVX-NEXT: movq %rdx, %rsi
+; AVX-NEXT: sarq $63, %rsi
+; AVX-NEXT: vpextrq $1, %xmm1, %rdi
+; AVX-NEXT: movq %rdi, %r8
+; AVX-NEXT: sarq $63, %r8
+; AVX-NEXT: vmovq %xmm1, %r9
+; AVX-NEXT: movq %r9, %r10
+; AVX-NEXT: sarq $63, %r10
+; AVX-NEXT: addq %r9, %rdx
+; AVX-NEXT: adcq %rsi, %r10
+; AVX-NEXT: addq %rdi, %rax
+; AVX-NEXT: adcq %rcx, %r8
+; AVX-NEXT: addq $1, %rax
+; AVX-NEXT: adcq $0, %r8
+; AVX-NEXT: addq $1, %rdx
+; AVX-NEXT: adcq $0, %r10
+; AVX-NEXT: shldq $63, %rdx, %r10
+; AVX-NEXT: shldq $63, %rax, %r8
+; AVX-NEXT: vmovq %r8, %xmm0
+; AVX-NEXT: vmovq %r10, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+ %x0 = sext <2 x i64> %a0 to <2 x i128>
+ %x1 = sext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %inc = add <2 x i128> %sum, <i128 1, i128 1>
+ %shift = ashr <2 x i128> %inc, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psubb %xmm1, %xmm4
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm0
+; SSE-NEXT: psubb %xmm0, %xmm5
+; SSE-NEXT: paddb %xmm3, %xmm5
+; SSE-NEXT: paddb %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
+; AVX512-NEXT: vpsubb %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %or = or <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = ashr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <32 x i8> %or, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE2-LABEL: test_ext_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm8
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm5, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm6, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm7, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm8, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: psubw %xmm3, %xmm4
+; SSE2-NEXT: psubw %xmm3, %xmm0
+; SSE2-NEXT: psubw %xmm3, %xmm2
+; SSE2-NEXT: psubw %xmm3, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm7
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm0
+; SSE4-NEXT: paddw %xmm4, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm2
+; SSE4-NEXT: paddw %xmm5, %xmm2
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm1
+; SSE4-NEXT: paddw %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm3
+; SSE4-NEXT: paddw %xmm7, %xmm3
+; SSE4-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE4-NEXT: psubw %xmm4, %xmm0
+; SSE4-NEXT: psubw %xmm4, %xmm2
+; SSE4-NEXT: psubw %xmm4, %xmm1
+; SSE4-NEXT: psubw %xmm4, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm4, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm2
+; SSE4-NEXT: packuswb %xmm2, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm1
+; SSE4-NEXT: pand %xmm4, %xmm3
+; SSE4-NEXT: packuswb %xmm3, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm6
+; AVX1-NEXT: vpaddw %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i8> %a0 to <32 x i16>
+ %x1 = sext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %inc = add <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = ashr <32 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psraw $1, %xmm3
+; SSE-NEXT: psubw %xmm3, %xmm4
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: psubw %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubw %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = ashr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <16 x i16> %or, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE2-LABEL: test_ext_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: paddd %xmm5, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm6, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm1
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: psubd %xmm4, %xmm0
+; SSE2-NEXT: psubd %xmm4, %xmm2
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm2, %xmm0
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm7
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm0
+; SSE4-NEXT: paddd %xmm4, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm2
+; SSE4-NEXT: paddd %xmm5, %xmm2
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm1
+; SSE4-NEXT: paddd %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm3
+; SSE4-NEXT: paddd %xmm7, %xmm3
+; SSE4-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE4-NEXT: psubd %xmm4, %xmm0
+; SSE4-NEXT: psubd %xmm4, %xmm2
+; SSE4-NEXT: psubd %xmm4, %xmm1
+; SSE4-NEXT: psubd %xmm4, %xmm3
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm4, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm2, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm3, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm6
+; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i16> %a0 to <16 x i32>
+ %x1 = sext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %inc = add <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = ashr <16 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psrad $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm4
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = ashr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <8 x i32> %or, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm6, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; SSE2-NEXT: paddq %xmm7, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm4
+; SSE2-NEXT: psubq %xmm3, %xmm0
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm3, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm4
+; SSE4-NEXT: paddq %xmm5, %xmm4
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm0
+; SSE4-NEXT: paddq %xmm6, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm2
+; SSE4-NEXT: paddq %xmm7, %xmm2
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm1
+; SSE4-NEXT: paddq %xmm8, %xmm1
+; SSE4-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE4-NEXT: psubq %xmm3, %xmm4
+; SSE4-NEXT: psubq %xmm3, %xmm0
+; SSE4-NEXT: psubq %xmm3, %xmm2
+; SSE4-NEXT: psubq %xmm3, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm5
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,2],ymm0[0,2],ymm2[4,6],ymm0[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i32> %a0 to <8 x i64>
+ %x1 = sext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %inc = add <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = ashr <8 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: psubq %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: psubq %xmm1, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: por %xmm3, %xmm4
+; SSE4-NEXT: movdqa %xmm0, %xmm5
+; SSE4-NEXT: por %xmm2, %xmm5
+; SSE4-NEXT: pxor %xmm0, %xmm2
+; SSE4-NEXT: pxor %xmm1, %xmm3
+; SSE4-NEXT: movdqa %xmm3, %xmm0
+; SSE4-NEXT: psrad $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
+; SSE4-NEXT: psubq %xmm3, %xmm4
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: psrad $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE4-NEXT: psubq %xmm2, %xmm5
+; SSE4-NEXT: movdqa %xmm5, %xmm0
+; SSE4-NEXT: movdqa %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = ashr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = sub <4 x i64> %or, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: movq %r11, %r12
+; SSE2-NEXT: sarq $63, %r12
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movq %rcx, %rbx
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: movq %xmm1, %rdx
+; SSE2-NEXT: movq %rdx, %r14
+; SSE2-NEXT: sarq $63, %r14
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r9
+; SSE2-NEXT: movq %r9, %r15
+; SSE2-NEXT: sarq $63, %r15
+; SSE2-NEXT: movq %xmm2, %rsi
+; SSE2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r13
+; SSE2-NEXT: movq %r13, %r8
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: movq %xmm3, %rbp
+; SSE2-NEXT: movq %rbp, %rdi
+; SSE2-NEXT: sarq $63, %rdi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: addq %rax, %r9
+; SSE2-NEXT: adcq %r15, %r10
+; SSE2-NEXT: addq %rbp, %rdx
+; SSE2-NEXT: adcq %r14, %rdi
+; SSE2-NEXT: addq %r13, %rcx
+; SSE2-NEXT: adcq %rbx, %r8
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE2-NEXT: adcq %r12, %rsi
+; SSE2-NEXT: addq $1, %r11
+; SSE2-NEXT: adcq $0, %rsi
+; SSE2-NEXT: addq $1, %rcx
+; SSE2-NEXT: adcq $0, %r8
+; SSE2-NEXT: addq $1, %rdx
+; SSE2-NEXT: adcq $0, %rdi
+; SSE2-NEXT: addq $1, %r9
+; SSE2-NEXT: adcq $0, %r10
+; SSE2-NEXT: shldq $63, %r9, %r10
+; SSE2-NEXT: shldq $63, %rdx, %rdi
+; SSE2-NEXT: shldq $63, %rcx, %r8
+; SSE2-NEXT: shldq $63, %r11, %rsi
+; SSE2-NEXT: movq %rsi, %xmm0
+; SSE2-NEXT: movq %r8, %xmm2
+; SSE2-NEXT: movq %rdi, %xmm1
+; SSE2-NEXT: movq %r10, %xmm3
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: pextrq $1, %xmm0, %r11
+; SSE4-NEXT: movq %r11, %r12
+; SSE4-NEXT: sarq $63, %r12
+; SSE4-NEXT: movq %xmm0, %rcx
+; SSE4-NEXT: movq %rcx, %rbx
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: pextrq $1, %xmm1, %rdx
+; SSE4-NEXT: movq %rdx, %r14
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: movq %xmm1, %r9
+; SSE4-NEXT: movq %r9, %r15
+; SSE4-NEXT: sarq $63, %r15
+; SSE4-NEXT: pextrq $1, %xmm2, %rsi
+; SSE4-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: movq %xmm2, %r13
+; SSE4-NEXT: movq %r13, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: pextrq $1, %xmm3, %rbp
+; SSE4-NEXT: movq %rbp, %rdi
+; SSE4-NEXT: sarq $63, %rdi
+; SSE4-NEXT: movq %xmm3, %rax
+; SSE4-NEXT: movq %rax, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: addq %rax, %r9
+; SSE4-NEXT: adcq %r15, %r10
+; SSE4-NEXT: addq %rbp, %rdx
+; SSE4-NEXT: adcq %r14, %rdi
+; SSE4-NEXT: addq %r13, %rcx
+; SSE4-NEXT: adcq %rbx, %r8
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE4-NEXT: adcq %r12, %rsi
+; SSE4-NEXT: addq $1, %r11
+; SSE4-NEXT: adcq $0, %rsi
+; SSE4-NEXT: addq $1, %rcx
+; SSE4-NEXT: adcq $0, %r8
+; SSE4-NEXT: addq $1, %rdx
+; SSE4-NEXT: adcq $0, %rdi
+; SSE4-NEXT: addq $1, %r9
+; SSE4-NEXT: adcq $0, %r10
+; SSE4-NEXT: shldq $63, %r9, %r10
+; SSE4-NEXT: shldq $63, %rdx, %rdi
+; SSE4-NEXT: shldq $63, %rcx, %r8
+; SSE4-NEXT: shldq $63, %r11, %rsi
+; SSE4-NEXT: movq %rsi, %xmm2
+; SSE4-NEXT: movq %r8, %xmm0
+; SSE4-NEXT: movq %rdi, %xmm3
+; SSE4-NEXT: movq %r10, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpextrq $1, %xmm2, %r11
+; AVX1-NEXT: movq %r11, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vmovq %xmm2, %rcx
+; AVX1-NEXT: movq %rcx, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vmovq %xmm0, %r8
+; AVX1-NEXT: movq %r8, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vmovq %xmm0, %r13
+; AVX1-NEXT: movq %r13, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX1-NEXT: movq %rbp, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: movq %rax, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: addq %rax, %r8
+; AVX1-NEXT: adcq %r15, %r10
+; AVX1-NEXT: addq %rbp, %rdx
+; AVX1-NEXT: adcq %r14, %r9
+; AVX1-NEXT: addq %r13, %rcx
+; AVX1-NEXT: adcq %rbx, %rdi
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX1-NEXT: adcq %r12, %rsi
+; AVX1-NEXT: addq $1, %r11
+; AVX1-NEXT: adcq $0, %rsi
+; AVX1-NEXT: addq $1, %rcx
+; AVX1-NEXT: adcq $0, %rdi
+; AVX1-NEXT: addq $1, %rdx
+; AVX1-NEXT: adcq $0, %r9
+; AVX1-NEXT: addq $1, %r8
+; AVX1-NEXT: adcq $0, %r10
+; AVX1-NEXT: shldq $63, %r8, %r10
+; AVX1-NEXT: shldq $63, %rdx, %r9
+; AVX1-NEXT: shldq $63, %rcx, %rdi
+; AVX1-NEXT: shldq $63, %r11, %rsi
+; AVX1-NEXT: vmovq %rsi, %xmm0
+; AVX1-NEXT: vmovq %rdi, %xmm1
+; AVX1-NEXT: vmovq %r9, %xmm2
+; AVX1-NEXT: vmovq %r10, %xmm3
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpextrq $1, %xmm2, %r11
+; AVX2-NEXT: movq %r11, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vmovq %xmm2, %rcx
+; AVX2-NEXT: movq %rcx, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vmovq %xmm0, %r8
+; AVX2-NEXT: movq %r8, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vmovq %xmm0, %r13
+; AVX2-NEXT: movq %r13, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX2-NEXT: movq %rbp, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: movq %rax, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: addq %rax, %r8
+; AVX2-NEXT: adcq %r15, %r10
+; AVX2-NEXT: addq %rbp, %rdx
+; AVX2-NEXT: adcq %r14, %r9
+; AVX2-NEXT: addq %r13, %rcx
+; AVX2-NEXT: adcq %rbx, %rdi
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: adcq %r12, %rsi
+; AVX2-NEXT: addq $1, %r11
+; AVX2-NEXT: adcq $0, %rsi
+; AVX2-NEXT: addq $1, %rcx
+; AVX2-NEXT: adcq $0, %rdi
+; AVX2-NEXT: addq $1, %rdx
+; AVX2-NEXT: adcq $0, %r9
+; AVX2-NEXT: addq $1, %r8
+; AVX2-NEXT: adcq $0, %r10
+; AVX2-NEXT: shldq $63, %r8, %r10
+; AVX2-NEXT: shldq $63, %rdx, %r9
+; AVX2-NEXT: shldq $63, %rcx, %rdi
+; AVX2-NEXT: shldq $63, %r11, %rsi
+; AVX2-NEXT: vmovq %rsi, %xmm0
+; AVX2-NEXT: vmovq %rdi, %xmm1
+; AVX2-NEXT: vmovq %r9, %xmm2
+; AVX2-NEXT: vmovq %r10, %xmm3
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %r11
+; AVX512-NEXT: movq %r11, %r12
+; AVX512-NEXT: sarq $63, %r12
+; AVX512-NEXT: vmovq %xmm2, %rcx
+; AVX512-NEXT: movq %rcx, %rbx
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512-NEXT: sarq $63, %rbx
+; AVX512-NEXT: movq %rdx, %r14
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vmovq %xmm0, %rdi
+; AVX512-NEXT: movq %rdi, %r15
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r15
+; AVX512-NEXT: sarq $63, %rsi
+; AVX512-NEXT: vmovq %xmm0, %r13
+; AVX512-NEXT: movq %r13, %r8
+; AVX512-NEXT: sarq $63, %r8
+; AVX512-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX512-NEXT: movq %rbp, %r9
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vmovq %xmm1, %rax
+; AVX512-NEXT: movq %rax, %r10
+; AVX512-NEXT: sarq $63, %r10
+; AVX512-NEXT: addq %rax, %rdi
+; AVX512-NEXT: adcq %r15, %r10
+; AVX512-NEXT: addq %rbp, %rdx
+; AVX512-NEXT: adcq %r14, %r9
+; AVX512-NEXT: addq %r13, %rcx
+; AVX512-NEXT: adcq %rbx, %r8
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: adcq %r12, %rsi
+; AVX512-NEXT: addq $1, %r11
+; AVX512-NEXT: adcq $0, %rsi
+; AVX512-NEXT: addq $1, %rcx
+; AVX512-NEXT: adcq $0, %r8
+; AVX512-NEXT: addq $1, %rdx
+; AVX512-NEXT: adcq $0, %r9
+; AVX512-NEXT: addq $1, %rdi
+; AVX512-NEXT: adcq $0, %r10
+; AVX512-NEXT: shldq $63, %rdi, %r10
+; AVX512-NEXT: shldq $63, %rdx, %r9
+; AVX512-NEXT: shldq $63, %rcx, %r8
+; AVX512-NEXT: shldq $63, %r11, %rsi
+; AVX512-NEXT: vmovq %rsi, %xmm0
+; AVX512-NEXT: vmovq %r8, %xmm1
+; AVX512-NEXT: vmovq %r9, %xmm2
+; AVX512-NEXT: vmovq %r10, %xmm3
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i64> %a0 to <4 x i128>
+ %x1 = sext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %inc = add <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %shift = ashr <4 x i128> %inc, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm0, %xmm10
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: por %xmm5, %xmm1
+; SSE-NEXT: por %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm10
+; SSE-NEXT: pxor %xmm5, %xmm9
+; SSE-NEXT: pxor %xmm6, %xmm8
+; SSE-NEXT: pxor %xmm7, %xmm11
+; SSE-NEXT: psrlw $1, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm5, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm4, %xmm11
+; SSE-NEXT: psubb %xmm11, %xmm3
+; SSE-NEXT: psrlw $1, %xmm8
+; SSE-NEXT: pand %xmm5, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm8
+; SSE-NEXT: psubb %xmm8, %xmm2
+; SSE-NEXT: psrlw $1, %xmm9
+; SSE-NEXT: pand %xmm5, %xmm9
+; SSE-NEXT: pxor %xmm4, %xmm9
+; SSE-NEXT: psubb %xmm9, %xmm1
+; SSE-NEXT: psrlw $1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: pxor %xmm4, %xmm10
+; SSE-NEXT: psubb %xmm10, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm7
+; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
+; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm7, %xmm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddb %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpaddb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsubb %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
+; AVX512-NEXT: vpsubb %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %or = or <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = ashr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <64 x i8> %or, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE2-LABEL: test_ext_v64i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm0[8],xmm14[9],xmm0[9],xmm14[10],xmm0[10],xmm14[11],xmm0[11],xmm14[12],xmm0[12],xmm14[13],xmm0[13],xmm14[14],xmm0[14],xmm14[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm2[8],xmm12[9],xmm2[9],xmm12[10],xmm2[10],xmm12[11],xmm2[11],xmm12[12],xmm2[12],xmm12[13],xmm2[13],xmm12[14],xmm2[14],xmm12[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3],xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; SSE2-NEXT: psraw $8, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE2-NEXT: psraw $8, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
+; SSE2-NEXT: psraw $8, %xmm8
+; SSE2-NEXT: paddw %xmm14, %xmm8
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm15, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm3, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm13, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: paddw %xmm12, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm11, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: paddw %xmm10, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: paddw %xmm9, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
+; SSE2-NEXT: psubw %xmm7, %xmm8
+; SSE2-NEXT: psubw %xmm7, %xmm0
+; SSE2-NEXT: psubw %xmm7, %xmm4
+; SSE2-NEXT: psubw %xmm7, %xmm1
+; SSE2-NEXT: psubw %xmm7, %xmm5
+; SSE2-NEXT: psubw %xmm7, %xmm2
+; SSE2-NEXT: psubw %xmm7, %xmm6
+; SSE2-NEXT: psubw %xmm7, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm8
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm7, %xmm8
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: packuswb %xmm8, %xmm0
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: packuswb %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: pand %xmm7, %xmm6
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v64i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm9
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm11
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm13
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm15
+; SSE4-NEXT: pmovsxbw %xmm4, %xmm0
+; SSE4-NEXT: paddw %xmm8, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm4
+; SSE4-NEXT: paddw %xmm9, %xmm4
+; SSE4-NEXT: pmovsxbw %xmm5, %xmm1
+; SSE4-NEXT: paddw %xmm10, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm5
+; SSE4-NEXT: paddw %xmm11, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm6, %xmm2
+; SSE4-NEXT: paddw %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm6
+; SSE4-NEXT: paddw %xmm13, %xmm6
+; SSE4-NEXT: pmovsxbw %xmm7, %xmm3
+; SSE4-NEXT: paddw %xmm14, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm7, %xmm7
+; SSE4-NEXT: paddw %xmm15, %xmm7
+; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE4-NEXT: psubw %xmm8, %xmm0
+; SSE4-NEXT: psubw %xmm8, %xmm4
+; SSE4-NEXT: psubw %xmm8, %xmm1
+; SSE4-NEXT: psubw %xmm8, %xmm5
+; SSE4-NEXT: psubw %xmm8, %xmm2
+; SSE4-NEXT: psubw %xmm8, %xmm6
+; SSE4-NEXT: psubw %xmm8, %xmm3
+; SSE4-NEXT: psubw %xmm8, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm6
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm5
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm8, %xmm0
+; SSE4-NEXT: pand %xmm8, %xmm4
+; SSE4-NEXT: packuswb %xmm4, %xmm0
+; SSE4-NEXT: pand %xmm8, %xmm1
+; SSE4-NEXT: pand %xmm8, %xmm5
+; SSE4-NEXT: packuswb %xmm5, %xmm1
+; SSE4-NEXT: pand %xmm8, %xmm2
+; SSE4-NEXT: pand %xmm8, %xmm6
+; SSE4-NEXT: packuswb %xmm6, %xmm2
+; SSE4-NEXT: pand %xmm8, %xmm3
+; SSE4-NEXT: pand %xmm8, %xmm7
+; SSE4-NEXT: packuswb %xmm7, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpmovsxbw %xmm7, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm11
+; AVX1-NEXT: vpaddw %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
+; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm10
+; AVX1-NEXT: vpaddw %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm10
+; AVX1-NEXT: vpaddw %xmm10, %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm7
+; AVX1-NEXT: vpaddw %xmm7, %xmm9, %xmm7
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm5
+; AVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpsubw %xmm3, %xmm6, %xmm6
+; AVX1-NEXT: vpsubw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubw %xmm3, %xmm8, %xmm8
+; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm4
+; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm8, %xmm5
+; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm4
+; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovsxbw %xmm6, %ymm6
+; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpaddw %ymm2, %ymm5, %ymm2
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpsubw %ymm3, %ymm4, %ymm4
+; AVX2-NEXT: vpsubw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsubw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm3
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm3
+; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubw %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <64 x i8> %a0 to <64 x i16>
+ %x1 = sext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %inc = add <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = ashr <64 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psraw $1, %xmm7
+; SSE-NEXT: psubw %xmm7, %xmm3
+; SSE-NEXT: psraw $1, %xmm6
+; SSE-NEXT: psubw %xmm6, %xmm9
+; SSE-NEXT: psraw $1, %xmm5
+; SSE-NEXT: psubw %xmm5, %xmm10
+; SSE-NEXT: psraw $1, %xmm4
+; SSE-NEXT: psubw %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubw %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubw %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = ashr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <32 x i16> %or, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE2-LABEL: test_ext_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
+; SSE2-NEXT: psrad $16, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm3[4],xmm14[5],xmm3[5],xmm14[6],xmm3[6],xmm14[7],xmm3[7]
+; SSE2-NEXT: psrad $16, %xmm14
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7]
+; SSE2-NEXT: psrad $16, %xmm12
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm10
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: paddd %xmm13, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: paddd %xmm14, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm15, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm11, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: paddd %xmm10, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm9, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm8, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE2-NEXT: psubd %xmm8, %xmm3
+; SSE2-NEXT: psubd %xmm8, %xmm7
+; SSE2-NEXT: psubd %xmm8, %xmm2
+; SSE2-NEXT: psubd %xmm8, %xmm6
+; SSE2-NEXT: psubd %xmm8, %xmm1
+; SSE2-NEXT: psubd %xmm8, %xmm5
+; SSE2-NEXT: psubd %xmm8, %xmm0
+; SSE2-NEXT: psubd %xmm8, %xmm4
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm4, %xmm0
+; SSE2-NEXT: pslld $15, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm5, %xmm1
+; SSE2-NEXT: pslld $15, %xmm6
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm6, %xmm2
+; SSE2-NEXT: pslld $15, %xmm7
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: packssdw %xmm7, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm9
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm11
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm13
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm15
+; SSE4-NEXT: pmovsxwd %xmm4, %xmm0
+; SSE4-NEXT: paddd %xmm8, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm4
+; SSE4-NEXT: paddd %xmm9, %xmm4
+; SSE4-NEXT: pmovsxwd %xmm5, %xmm1
+; SSE4-NEXT: paddd %xmm10, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm5
+; SSE4-NEXT: paddd %xmm11, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm6, %xmm2
+; SSE4-NEXT: paddd %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm6
+; SSE4-NEXT: paddd %xmm13, %xmm6
+; SSE4-NEXT: pmovsxwd %xmm7, %xmm3
+; SSE4-NEXT: paddd %xmm14, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm7, %xmm7
+; SSE4-NEXT: paddd %xmm15, %xmm7
+; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE4-NEXT: psubd %xmm8, %xmm0
+; SSE4-NEXT: psubd %xmm8, %xmm4
+; SSE4-NEXT: psubd %xmm8, %xmm1
+; SSE4-NEXT: psubd %xmm8, %xmm5
+; SSE4-NEXT: psubd %xmm8, %xmm2
+; SSE4-NEXT: psubd %xmm8, %xmm6
+; SSE4-NEXT: psubd %xmm8, %xmm3
+; SSE4-NEXT: psubd %xmm8, %xmm7
+; SSE4-NEXT: psrld $1, %xmm7
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm6
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm5
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm8, %xmm8
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm4, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm5, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm6, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; SSE4-NEXT: packusdw %xmm7, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpmovsxwd %xmm7, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm11
+; AVX1-NEXT: vpaddd %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
+; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm10
+; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm10
+; AVX1-NEXT: vpaddd %xmm10, %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm9, %xmm7
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm5
+; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm3, %xmm6, %xmm6
+; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm3, %xmm8, %xmm8
+; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vpmovsxwd %xmm4, %ymm4
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX2-NEXT: vpmovsxwd %xmm5, %ymm5
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovsxwd %xmm6, %ymm6
+; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm5, %ymm2
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpsubd %ymm3, %ymm4, %ymm4
+; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm4, %ymm3
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7],ymm3[8],ymm4[9],ymm3[10],ymm4[11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7],ymm2[8],ymm4[9],ymm2[10],ymm4[11],ymm2[12],ymm4[13],ymm2[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm3
+; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubd %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i16> %a0 to <32 x i32>
+ %x1 = sext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %inc = add <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = ashr <32 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psrad $1, %xmm7
+; SSE-NEXT: psubd %xmm7, %xmm3
+; SSE-NEXT: psrad $1, %xmm6
+; SSE-NEXT: psubd %xmm6, %xmm9
+; SSE-NEXT: psrad $1, %xmm5
+; SSE-NEXT: psubd %xmm5, %xmm10
+; SSE-NEXT: psrad $1, %xmm4
+; SSE-NEXT: psubd %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = ashr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <16 x i32> %or, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm13, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm1[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm12, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm8[0],xmm12[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[2,3,2,3]
+; SSE2-NEXT: pxor %xmm14, %xmm14
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm14
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1]
+; SSE2-NEXT: paddq %xmm13, %xmm8
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm13
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,2,3]
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm13
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1]
+; SSE2-NEXT: paddq %xmm12, %xmm4
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
+; SSE2-NEXT: paddq %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
+; SSE2-NEXT: paddq %xmm11, %xmm5
+; SSE2-NEXT: pxor %xmm11, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm11
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3]
+; SSE2-NEXT: pxor %xmm11, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm11
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
+; SSE2-NEXT: paddq %xmm10, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm7, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
+; SSE2-NEXT: psubq %xmm7, %xmm8
+; SSE2-NEXT: psubq %xmm7, %xmm0
+; SSE2-NEXT: psubq %xmm7, %xmm4
+; SSE2-NEXT: psubq %xmm7, %xmm1
+; SSE2-NEXT: psubq %xmm7, %xmm5
+; SSE2-NEXT: psubq %xmm7, %xmm2
+; SSE2-NEXT: psubq %xmm7, %xmm6
+; SSE2-NEXT: psubq %xmm7, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm8, %xmm9
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm11
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm13
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm15
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm0
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm8
+; SSE4-NEXT: paddq %xmm9, %xmm8
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm0
+; SSE4-NEXT: paddq %xmm10, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm4
+; SSE4-NEXT: paddq %xmm11, %xmm4
+; SSE4-NEXT: pmovsxdq %xmm5, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm5
+; SSE4-NEXT: paddq %xmm13, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm6, %xmm2
+; SSE4-NEXT: paddq %xmm14, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm6
+; SSE4-NEXT: paddq %xmm15, %xmm6
+; SSE4-NEXT: pmovsxdq %xmm7, %xmm3
+; SSE4-NEXT: paddq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: pcmpeqd %xmm7, %xmm7
+; SSE4-NEXT: psubq %xmm7, %xmm8
+; SSE4-NEXT: psubq %xmm7, %xmm0
+; SSE4-NEXT: psubq %xmm7, %xmm4
+; SSE4-NEXT: psubq %xmm7, %xmm1
+; SSE4-NEXT: psubq %xmm7, %xmm5
+; SSE4-NEXT: psubq %xmm7, %xmm2
+; SSE4-NEXT: psubq %xmm7, %xmm6
+; SSE4-NEXT: psubq %xmm7, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm8
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm8
+; AVX1-NEXT: vpmovsxdq %xmm8, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm8, %xmm8
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm10
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm11
+; AVX1-NEXT: vpaddq %xmm6, %xmm11, %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm5
+; AVX1-NEXT: vpaddq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT: vpmovsxdq %xmm7, %xmm10
+; AVX1-NEXT: vpaddq %xmm10, %xmm9, %xmm9
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm7, %xmm7, %xmm7
+; AVX1-NEXT: vpsubq %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpsubq %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpsubq %xmm7, %xmm9, %xmm8
+; AVX1-NEXT: vpsubq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm4[0,2],ymm0[0,2],ymm4[4,6],ymm0[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[0,2],ymm2[4,6],ymm1[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm4
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxdq %xmm3, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm2, %ymm4, %ymm3
+; AVX2-NEXT: vpsubq %ymm2, %ymm5, %ymm4
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm4[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm3[2,3],ymm1[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm3
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i32> %a0 to <16 x i64>
+ %x1 = sext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %inc = add <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = ashr <16 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: movdqa %xmm1, %xmm10
+; SSE2-NEXT: movdqa %xmm0, %xmm11
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm11, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: pxor %xmm9, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm8
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE2-NEXT: psubq %xmm7, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
+; SSE2-NEXT: psubq %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm6
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; SSE2-NEXT: psubq %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: psubq %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm8
+; SSE4-NEXT: movdqa %xmm2, %xmm9
+; SSE4-NEXT: movdqa %xmm1, %xmm10
+; SSE4-NEXT: movdqa %xmm0, %xmm11
+; SSE4-NEXT: por %xmm7, %xmm3
+; SSE4-NEXT: por %xmm6, %xmm2
+; SSE4-NEXT: por %xmm5, %xmm1
+; SSE4-NEXT: por %xmm4, %xmm0
+; SSE4-NEXT: pxor %xmm11, %xmm4
+; SSE4-NEXT: pxor %xmm10, %xmm5
+; SSE4-NEXT: pxor %xmm9, %xmm6
+; SSE4-NEXT: pxor %xmm8, %xmm7
+; SSE4-NEXT: movdqa %xmm7, %xmm8
+; SSE4-NEXT: psrad $1, %xmm8
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3],xmm7[4,5],xmm8[6,7]
+; SSE4-NEXT: psubq %xmm7, %xmm3
+; SSE4-NEXT: movdqa %xmm6, %xmm7
+; SSE4-NEXT: psrad $1, %xmm7
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
+; SSE4-NEXT: psubq %xmm6, %xmm2
+; SSE4-NEXT: movdqa %xmm5, %xmm6
+; SSE4-NEXT: psrad $1, %xmm6
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; SSE4-NEXT: psubq %xmm5, %xmm1
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: psubq %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm6
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX2-NEXT: vpsubq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-NEXT: vpsubq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = ashr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = sub <8 x i64> %or, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: .cfi_def_cfa_offset 64
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rax
+; SSE2-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm3, %rbx
+; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %rdi, %rbp
+; SSE2-NEXT: sarq $63, %rbp
+; SSE2-NEXT: movq %xmm4, %r8
+; SSE2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r11
+; SSE2-NEXT: movq %xmm5, %r10
+; SSE2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r15
+; SSE2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r15
+; SSE2-NEXT: movq %xmm6, %r9
+; SSE2-NEXT: movq %r9, %r14
+; SSE2-NEXT: sarq $63, %r14
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rsi
+; SSE2-NEXT: movq %rsi, %r13
+; SSE2-NEXT: sarq $63, %r13
+; SSE2-NEXT: movq %xmm7, %rdx
+; SSE2-NEXT: movq %rdx, %r12
+; SSE2-NEXT: sarq $63, %r12
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: addq %rax, %rdi
+; SSE2-NEXT: adcq %rbp, %rcx
+; SSE2-NEXT: addq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %rbx, %r12
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; SSE2-NEXT: addq %rsi, %rbp
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE2-NEXT: addq %r9, %rbx
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; SSE2-NEXT: adcq (%rsp), %r8 # 8-byte Folded Reload
+; SSE2-NEXT: addq $1, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: adcq $0, %r8
+; SSE2-NEXT: addq $1, %rdx
+; SSE2-NEXT: adcq $0, %r11
+; SSE2-NEXT: addq $1, %rsi
+; SSE2-NEXT: adcq $0, %r10
+; SSE2-NEXT: addq $1, %r9
+; SSE2-NEXT: adcq $0, %r15
+; SSE2-NEXT: addq $1, %rbx
+; SSE2-NEXT: adcq $0, %r14
+; SSE2-NEXT: addq $1, %rbp
+; SSE2-NEXT: adcq $0, %r13
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: addq $1, %rax
+; SSE2-NEXT: adcq $0, %r12
+; SSE2-NEXT: addq $1, %rdi
+; SSE2-NEXT: adcq $0, %rcx
+; SSE2-NEXT: shldq $63, %rdi, %rcx
+; SSE2-NEXT: shldq $63, %rax, %r12
+; SSE2-NEXT: shldq $63, %rbp, %r13
+; SSE2-NEXT: shldq $63, %rbx, %r14
+; SSE2-NEXT: shldq $63, %r9, %r15
+; SSE2-NEXT: shldq $63, %rsi, %r10
+; SSE2-NEXT: shldq $63, %rdx, %r11
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rax, %r8
+; SSE2-NEXT: movq %r8, %xmm0
+; SSE2-NEXT: movq %r11, %xmm4
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: movq %r15, %xmm5
+; SSE2-NEXT: movq %r14, %xmm2
+; SSE2-NEXT: movq %r13, %xmm6
+; SSE2-NEXT: movq %r12, %xmm3
+; SSE2-NEXT: movq %rcx, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: addq $8, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: subq $16, %rsp
+; SSE4-NEXT: .cfi_def_cfa_offset 72
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: pextrq $1, %xmm0, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm1, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm1, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm3, %r13
+; SSE4-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r13
+; SSE4-NEXT: movq %xmm3, %rax
+; SSE4-NEXT: movq %rax, %rsi
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: pextrq $1, %xmm4, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm4, %r11
+; SSE4-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r11
+; SSE4-NEXT: pextrq $1, %xmm5, %r10
+; SSE4-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: movq %xmm5, %rax
+; SSE4-NEXT: movq %rax, %r14
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: pextrq $1, %xmm6, %rdi
+; SSE4-NEXT: movq %rdi, %rbx
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: movq %xmm6, %rdx
+; SSE4-NEXT: movq %rdx, %r12
+; SSE4-NEXT: sarq $63, %r12
+; SSE4-NEXT: pextrq $1, %xmm7, %r15
+; SSE4-NEXT: movq %r15, %r9
+; SSE4-NEXT: sarq $63, %r9
+; SSE4-NEXT: movq %xmm7, %rbp
+; SSE4-NEXT: movq %rbp, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: addq %rbp, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: adcq %rsi, %r8
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: addq %r15, %rcx
+; SSE4-NEXT: adcq %r13, %r9
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; SSE4-NEXT: addq %rdx, %rbp
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; SSE4-NEXT: addq %rdi, %r13
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE4-NEXT: addq %rax, %r15
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE4-NEXT: adcq (%rsp), %r11 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; SSE4-NEXT: addq $1, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: adcq $0, %rax
+; SSE4-NEXT: movq %rax, %rdx
+; SSE4-NEXT: addq $1, %rsi
+; SSE4-NEXT: adcq $0, %r11
+; SSE4-NEXT: addq $1, %rdi
+; SSE4-NEXT: adcq $0, %r10
+; SSE4-NEXT: addq $1, %r15
+; SSE4-NEXT: adcq $0, %r14
+; SSE4-NEXT: addq $1, %r13
+; SSE4-NEXT: adcq $0, %rbx
+; SSE4-NEXT: addq $1, %rbp
+; SSE4-NEXT: adcq $0, %r12
+; SSE4-NEXT: addq $1, %rcx
+; SSE4-NEXT: adcq $0, %r9
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: addq $1, %rax
+; SSE4-NEXT: adcq $0, %r8
+; SSE4-NEXT: shldq $63, %rax, %r8
+; SSE4-NEXT: shldq $63, %rcx, %r9
+; SSE4-NEXT: shldq $63, %rbp, %r12
+; SSE4-NEXT: shldq $63, %r13, %rbx
+; SSE4-NEXT: shldq $63, %r15, %r14
+; SSE4-NEXT: shldq $63, %rdi, %r10
+; SSE4-NEXT: shldq $63, %rsi, %r11
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rdx
+; SSE4-NEXT: movq %rdx, %xmm4
+; SSE4-NEXT: movq %r11, %xmm0
+; SSE4-NEXT: movq %r10, %xmm5
+; SSE4-NEXT: movq %r14, %xmm1
+; SSE4-NEXT: movq %rbx, %xmm6
+; SSE4-NEXT: movq %r12, %xmm2
+; SSE4-NEXT: movq %r9, %xmm7
+; SSE4-NEXT: movq %r8, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: addq $16, %rsp
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: pushq %rax
+; AVX1-NEXT: .cfi_def_cfa_offset 64
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpextrq $1, %xmm4, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm4, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vmovq %xmm1, %r8
+; AVX1-NEXT: movq %r8, %rbp
+; AVX1-NEXT: sarq $63, %rbp
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %r9
+; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vmovq %xmm0, %r10
+; AVX1-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpextrq $1, %xmm2, %r11
+; AVX1-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vmovq %xmm2, %r15
+; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX1-NEXT: movq %rdi, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vmovq %xmm0, %rsi
+; AVX1-NEXT: movq %rsi, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpextrq $1, %xmm3, %r13
+; AVX1-NEXT: movq %r13, %rdx
+; AVX1-NEXT: sarq $63, %rdx
+; AVX1-NEXT: vmovq %xmm3, %rax
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: addq %rax, %r8
+; AVX1-NEXT: adcq %rbp, %rcx
+; AVX1-NEXT: addq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %rbx, %rdx
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX1-NEXT: addq %rsi, %rbp
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX1-NEXT: addq %rdi, %r13
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX1-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX1-NEXT: addq $1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: adcq $0, %r9
+; AVX1-NEXT: addq $1, %rsi
+; AVX1-NEXT: adcq $0, %r10
+; AVX1-NEXT: addq $1, %rdi
+; AVX1-NEXT: adcq $0, %r11
+; AVX1-NEXT: addq $1, %rbx
+; AVX1-NEXT: adcq $0, %r15
+; AVX1-NEXT: addq $1, %r13
+; AVX1-NEXT: adcq $0, %r14
+; AVX1-NEXT: addq $1, %rbp
+; AVX1-NEXT: adcq $0, %r12
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: addq $1, %rax
+; AVX1-NEXT: adcq $0, %rdx
+; AVX1-NEXT: addq $1, %r8
+; AVX1-NEXT: adcq $0, %rcx
+; AVX1-NEXT: shldq $63, %r8, %rcx
+; AVX1-NEXT: shldq $63, %rax, %rdx
+; AVX1-NEXT: shldq $63, %rbp, %r12
+; AVX1-NEXT: shldq $63, %r13, %r14
+; AVX1-NEXT: shldq $63, %rbx, %r15
+; AVX1-NEXT: shldq $63, %rdi, %r11
+; AVX1-NEXT: shldq $63, %rsi, %r10
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rax, %r9
+; AVX1-NEXT: vmovq %r9, %xmm0
+; AVX1-NEXT: vmovq %r10, %xmm1
+; AVX1-NEXT: vmovq %r11, %xmm2
+; AVX1-NEXT: vmovq %r15, %xmm3
+; AVX1-NEXT: vmovq %r14, %xmm4
+; AVX1-NEXT: vmovq %r12, %xmm5
+; AVX1-NEXT: vmovq %rdx, %xmm6
+; AVX1-NEXT: vmovq %rcx, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: addq $8, %rsp
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: pushq %rax
+; AVX2-NEXT: .cfi_def_cfa_offset 64
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm4, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vmovq %xmm1, %r8
+; AVX2-NEXT: movq %r8, %rbp
+; AVX2-NEXT: sarq $63, %rbp
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %r9
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vmovq %xmm0, %r10
+; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpextrq $1, %xmm2, %r11
+; AVX2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vmovq %xmm2, %r15
+; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX2-NEXT: movq %rdi, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vmovq %xmm0, %rsi
+; AVX2-NEXT: movq %rsi, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpextrq $1, %xmm3, %r13
+; AVX2-NEXT: movq %r13, %rdx
+; AVX2-NEXT: sarq $63, %rdx
+; AVX2-NEXT: vmovq %xmm3, %rax
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: addq %rax, %r8
+; AVX2-NEXT: adcq %rbp, %rcx
+; AVX2-NEXT: addq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %rbx, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX2-NEXT: addq %rsi, %rbp
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT: addq %rdi, %r13
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX2-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX2-NEXT: addq $1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: adcq $0, %r9
+; AVX2-NEXT: addq $1, %rsi
+; AVX2-NEXT: adcq $0, %r10
+; AVX2-NEXT: addq $1, %rdi
+; AVX2-NEXT: adcq $0, %r11
+; AVX2-NEXT: addq $1, %rbx
+; AVX2-NEXT: adcq $0, %r15
+; AVX2-NEXT: addq $1, %r13
+; AVX2-NEXT: adcq $0, %r14
+; AVX2-NEXT: addq $1, %rbp
+; AVX2-NEXT: adcq $0, %r12
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: addq $1, %rax
+; AVX2-NEXT: adcq $0, %rdx
+; AVX2-NEXT: addq $1, %r8
+; AVX2-NEXT: adcq $0, %rcx
+; AVX2-NEXT: shldq $63, %r8, %rcx
+; AVX2-NEXT: shldq $63, %rax, %rdx
+; AVX2-NEXT: shldq $63, %rbp, %r12
+; AVX2-NEXT: shldq $63, %r13, %r14
+; AVX2-NEXT: shldq $63, %rbx, %r15
+; AVX2-NEXT: shldq $63, %rdi, %r11
+; AVX2-NEXT: shldq $63, %rsi, %r10
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rax, %r9
+; AVX2-NEXT: vmovq %r9, %xmm0
+; AVX2-NEXT: vmovq %r10, %xmm1
+; AVX2-NEXT: vmovq %r11, %xmm2
+; AVX2-NEXT: vmovq %r15, %xmm3
+; AVX2-NEXT: vmovq %r14, %xmm4
+; AVX2-NEXT: vmovq %r12, %xmm5
+; AVX2-NEXT: vmovq %rdx, %xmm6
+; AVX2-NEXT: vmovq %rcx, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: addq $8, %rsp
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: pushq %rax
+; AVX512-NEXT: .cfi_def_cfa_offset 64
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512-NEXT: vpextrq $1, %xmm3, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %rax
+; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm3, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm0, %rbx
+; AVX512-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %rbx
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: movq %r8, %r13
+; AVX512-NEXT: sarq $63, %r13
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vmovq %xmm2, %r10
+; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r10
+; AVX512-NEXT: vpextrq $1, %xmm0, %r11
+; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r11
+; AVX512-NEXT: vmovq %xmm0, %r14
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX512-NEXT: movq %rdi, %r15
+; AVX512-NEXT: sarq $63, %r15
+; AVX512-NEXT: vmovq %xmm0, %rsi
+; AVX512-NEXT: movq %rsi, %r12
+; AVX512-NEXT: sarq $63, %r12
+; AVX512-NEXT: vpextrq $1, %xmm1, %rbp
+; AVX512-NEXT: movq %rbp, %rdx
+; AVX512-NEXT: sarq $63, %rdx
+; AVX512-NEXT: vmovq %xmm1, %rax
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: addq %rax, %r8
+; AVX512-NEXT: adcq %r13, %rcx
+; AVX512-NEXT: addq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT: adcq %rbx, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX512-NEXT: addq %rsi, %rbp
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX512-NEXT: addq %rdi, %r13
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX512-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX512-NEXT: addq $1, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: adcq $0, %r9
+; AVX512-NEXT: addq $1, %rsi
+; AVX512-NEXT: adcq $0, %r10
+; AVX512-NEXT: addq $1, %rdi
+; AVX512-NEXT: adcq $0, %r11
+; AVX512-NEXT: addq $1, %rbx
+; AVX512-NEXT: adcq $0, %r14
+; AVX512-NEXT: addq $1, %r13
+; AVX512-NEXT: adcq $0, %r15
+; AVX512-NEXT: addq $1, %rbp
+; AVX512-NEXT: adcq $0, %r12
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: addq $1, %rax
+; AVX512-NEXT: adcq $0, %rdx
+; AVX512-NEXT: addq $1, %r8
+; AVX512-NEXT: adcq $0, %rcx
+; AVX512-NEXT: shldq $63, %r8, %rcx
+; AVX512-NEXT: shldq $63, %rax, %rdx
+; AVX512-NEXT: shldq $63, %rbp, %r12
+; AVX512-NEXT: shldq $63, %r13, %r15
+; AVX512-NEXT: shldq $63, %rbx, %r14
+; AVX512-NEXT: shldq $63, %rdi, %r11
+; AVX512-NEXT: shldq $63, %rsi, %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rax, %r9
+; AVX512-NEXT: vmovq %r9, %xmm0
+; AVX512-NEXT: vmovq %r10, %xmm1
+; AVX512-NEXT: vmovq %r11, %xmm2
+; AVX512-NEXT: vmovq %r14, %xmm3
+; AVX512-NEXT: vmovq %r15, %xmm4
+; AVX512-NEXT: vmovq %r12, %xmm5
+; AVX512-NEXT: vmovq %rdx, %xmm6
+; AVX512-NEXT: vmovq %rcx, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: addq $8, %rsp
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i64> %a0 to <8 x i128>
+ %x1 = sext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %inc = add <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %shift = ashr <8 x i128> %inc, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/avgceilu.ll b/llvm/test/CodeGen/X86/avgceilu.ll
new file mode 100644
index 0000000..dee1a5a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgceilu.ll
@@ -0,0 +1,2187 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %or = or <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = lshr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <16 x i8> %or, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_ext_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_ext_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = zext <16 x i8> %a0 to <16 x i16>
+ %x1 = zext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %inc = add <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = lshr <16 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_ext_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_ext_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = zext <8 x i16> %a0 to <8 x i32>
+ %x1 = zext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %inc = add <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = lshr <8 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = lshr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = sub <4 x i32> %or, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: paddq %xmm0, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: psubq %xmm0, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE4-NEXT: paddq %xmm0, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE4-NEXT: psubq %xmm0, %xmm1
+; SSE4-NEXT: psubq %xmm0, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE4-NEXT: movaps %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i32> %a0 to <4 x i64>
+ %x1 = zext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %inc = add <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %shift = lshr <4 x i64> %inc, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: test_fixed_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm0, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: psubq %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %or = or <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = lshr <2 x i64> %xor, <i64 1, i64 1>
+ %res = sub <2 x i64> %or, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movb $1, %dl
+; SSE2-NEXT: movb $1, %sil
+; SSE2-NEXT: addb $-1, %sil
+; SSE2-NEXT: leaq 1(%rax,%rcx), %rsi
+; SSE2-NEXT: adcq %rcx, %rax
+; SSE2-NEXT: setb %al
+; SSE2-NEXT: addb $-1, %dl
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movq %xmm1, %rdx
+; SSE2-NEXT: leaq 1(%rcx,%rdx), %rdi
+; SSE2-NEXT: adcq %rdx, %rcx
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: shrdq $1, %rcx, %rdi
+; SSE2-NEXT: shrdq $1, %rax, %rsi
+; SSE2-NEXT: movq %rdi, %xmm0
+; SSE2-NEXT: movq %rsi, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: movq %xmm1, %rcx
+; SSE4-NEXT: movb $1, %dl
+; SSE4-NEXT: movb $1, %sil
+; SSE4-NEXT: addb $-1, %sil
+; SSE4-NEXT: leaq 1(%rax,%rcx), %rsi
+; SSE4-NEXT: adcq %rcx, %rax
+; SSE4-NEXT: setb %al
+; SSE4-NEXT: addb $-1, %dl
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: pextrq $1, %xmm1, %rdx
+; SSE4-NEXT: leaq 1(%rcx,%rdx), %rdi
+; SSE4-NEXT: adcq %rdx, %rcx
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: movzbl %cl, %ecx
+; SSE4-NEXT: movzbl %al, %eax
+; SSE4-NEXT: shrdq $1, %rcx, %rdi
+; SSE4-NEXT: shrdq $1, %rax, %rsi
+; SSE4-NEXT: movq %rdi, %xmm1
+; SSE4-NEXT: movq %rsi, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: test_ext_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: vmovq %xmm1, %rcx
+; AVX-NEXT: movb $1, %dl
+; AVX-NEXT: movb $1, %sil
+; AVX-NEXT: addb $-1, %sil
+; AVX-NEXT: leaq 1(%rax,%rcx), %rsi
+; AVX-NEXT: adcq %rcx, %rax
+; AVX-NEXT: setb %al
+; AVX-NEXT: addb $-1, %dl
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX-NEXT: leaq 1(%rcx,%rdx), %rdi
+; AVX-NEXT: adcq %rdx, %rcx
+; AVX-NEXT: setb %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: shrdq $1, %rcx, %rdi
+; AVX-NEXT: shrdq $1, %rax, %rsi
+; AVX-NEXT: vmovq %rdi, %xmm0
+; AVX-NEXT: vmovq %rsi, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+ %x0 = zext <2 x i64> %a0 to <2 x i128>
+ %x1 = zext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %inc = add <2 x i128> %sum, <i128 1, i128 1>
+ %shift = lshr <2 x i128> %inc, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm2, %xmm0
+; SSE-NEXT: pavgb %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %or = or <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = lshr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <32 x i8> %or, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_ext_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm2, %xmm0
+; SSE-NEXT: pavgb %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i8> %a0 to <32 x i16>
+ %x1 = zext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %inc = add <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = lshr <32 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm2, %xmm0
+; SSE-NEXT: pavgw %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %or = or <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = lshr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <16 x i16> %or, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_ext_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm2, %xmm0
+; SSE-NEXT: pavgw %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i16> %a0 to <16 x i32>
+ %x1 = zext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %inc = add <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = lshr <16 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psrld $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm4
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = lshr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <8 x i32> %or, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm6, %xmm0
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm7, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE2-NEXT: paddq %xmm1, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: psubq %xmm1, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm4
+; SSE2-NEXT: psubq %xmm1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE2-NEXT: movaps %xmm4, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm1, %xmm3
+; SSE4-NEXT: paddq %xmm6, %xmm0
+; SSE4-NEXT: paddq %xmm7, %xmm4
+; SSE4-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE4-NEXT: psubq %xmm1, %xmm2
+; SSE4-NEXT: psubq %xmm1, %xmm3
+; SSE4-NEXT: psubq %xmm1, %xmm0
+; SSE4-NEXT: psubq %xmm1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE4-NEXT: movaps %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i32> %a0 to <8 x i64>
+ %x1 = zext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %inc = add <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = lshr <8 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_fixed_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm3
+; SSE-NEXT: psrlq $1, %xmm3
+; SSE-NEXT: psubq %xmm3, %xmm4
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: psubq %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %or = or <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = lshr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = sub <4 x i64> %or, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rdx
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: movb $1, %sil
+; SSE2-NEXT: addb $-1, %sil
+; SSE2-NEXT: leaq 1(%rcx,%rdx), %rsi
+; SSE2-NEXT: adcq %rdx, %rcx
+; SSE2-NEXT: setb %dl
+; SSE2-NEXT: movb $1, %cl
+; SSE2-NEXT: addb $-1, %cl
+; SSE2-NEXT: movq %xmm1, %rdi
+; SSE2-NEXT: movq %xmm3, %r8
+; SSE2-NEXT: leaq 1(%rdi,%r8), %rcx
+; SSE2-NEXT: adcq %r8, %rdi
+; SSE2-NEXT: setb %dil
+; SSE2-NEXT: movb $1, %r8b
+; SSE2-NEXT: addb $-1, %r8b
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: leaq 1(%r8,%r9), %r10
+; SSE2-NEXT: adcq %r9, %r8
+; SSE2-NEXT: setb %r8b
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %xmm2, %r9
+; SSE2-NEXT: leaq 1(%rax,%r9), %r11
+; SSE2-NEXT: adcq %r9, %rax
+; SSE2-NEXT: setb %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movzbl %r8b, %r8d
+; SSE2-NEXT: movzbl %dil, %edi
+; SSE2-NEXT: movzbl %dl, %edx
+; SSE2-NEXT: shrdq $1, %rax, %r11
+; SSE2-NEXT: shrdq $1, %r8, %r10
+; SSE2-NEXT: shrdq $1, %rdi, %rcx
+; SSE2-NEXT: shrdq $1, %rdx, %rsi
+; SSE2-NEXT: movq %r11, %xmm0
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movq %rcx, %xmm1
+; SSE2-NEXT: movq %rsi, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm1, %rcx
+; SSE4-NEXT: movq %xmm3, %rdx
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: movb $1, %sil
+; SSE4-NEXT: addb $-1, %sil
+; SSE4-NEXT: leaq 1(%rcx,%rdx), %rsi
+; SSE4-NEXT: adcq %rdx, %rcx
+; SSE4-NEXT: setb %dl
+; SSE4-NEXT: movb $1, %cl
+; SSE4-NEXT: addb $-1, %cl
+; SSE4-NEXT: pextrq $1, %xmm1, %rdi
+; SSE4-NEXT: pextrq $1, %xmm3, %r8
+; SSE4-NEXT: leaq 1(%rdi,%r8), %rcx
+; SSE4-NEXT: adcq %r8, %rdi
+; SSE4-NEXT: setb %dil
+; SSE4-NEXT: movb $1, %r8b
+; SSE4-NEXT: addb $-1, %r8b
+; SSE4-NEXT: movq %xmm0, %r8
+; SSE4-NEXT: movq %xmm2, %r9
+; SSE4-NEXT: leaq 1(%r8,%r9), %r10
+; SSE4-NEXT: adcq %r9, %r8
+; SSE4-NEXT: setb %r8b
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm0, %rax
+; SSE4-NEXT: pextrq $1, %xmm2, %r9
+; SSE4-NEXT: leaq 1(%rax,%r9), %r11
+; SSE4-NEXT: adcq %r9, %rax
+; SSE4-NEXT: setb %al
+; SSE4-NEXT: movzbl %al, %eax
+; SSE4-NEXT: movzbl %r8b, %r8d
+; SSE4-NEXT: movzbl %dil, %edi
+; SSE4-NEXT: movzbl %dl, %edx
+; SSE4-NEXT: shrdq $1, %rax, %r11
+; SSE4-NEXT: shrdq $1, %r8, %r10
+; SSE4-NEXT: shrdq $1, %rdi, %rcx
+; SSE4-NEXT: shrdq $1, %rdx, %rsi
+; SSE4-NEXT: movq %r11, %xmm1
+; SSE4-NEXT: movq %r10, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: movq %rcx, %xmm2
+; SSE4-NEXT: movq %rsi, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq %xmm0, %rcx
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: movb $1, %sil
+; AVX1-NEXT: addb $-1, %sil
+; AVX1-NEXT: leaq 1(%rcx,%rdx), %rsi
+; AVX1-NEXT: adcq %rdx, %rcx
+; AVX1-NEXT: setb %dl
+; AVX1-NEXT: movb $1, %cl
+; AVX1-NEXT: addb $-1, %cl
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm1, %r8
+; AVX1-NEXT: leaq 1(%rdi,%r8), %rcx
+; AVX1-NEXT: adcq %r8, %rdi
+; AVX1-NEXT: setb %dil
+; AVX1-NEXT: movb $1, %r8b
+; AVX1-NEXT: addb $-1, %r8b
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r8
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovq %xmm1, %r9
+; AVX1-NEXT: leaq 1(%r8,%r9), %r10
+; AVX1-NEXT: adcq %r9, %r8
+; AVX1-NEXT: setb %r8b
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: vpextrq $1, %xmm1, %r9
+; AVX1-NEXT: leaq 1(%rax,%r9), %r11
+; AVX1-NEXT: adcq %r9, %rax
+; AVX1-NEXT: setb %al
+; AVX1-NEXT: movzbl %al, %eax
+; AVX1-NEXT: movzbl %r8b, %r8d
+; AVX1-NEXT: movzbl %dil, %edi
+; AVX1-NEXT: movzbl %dl, %edx
+; AVX1-NEXT: shrdq $1, %rax, %r11
+; AVX1-NEXT: shrdq $1, %r8, %r10
+; AVX1-NEXT: shrdq $1, %rdi, %rcx
+; AVX1-NEXT: shrdq $1, %rdx, %rsi
+; AVX1-NEXT: vmovq %r11, %xmm0
+; AVX1-NEXT: vmovq %r10, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: vmovq %rsi, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovq %xmm0, %rcx
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: movb $1, %sil
+; AVX2-NEXT: addb $-1, %sil
+; AVX2-NEXT: leaq 1(%rcx,%rdx), %rsi
+; AVX2-NEXT: adcq %rdx, %rcx
+; AVX2-NEXT: setb %dl
+; AVX2-NEXT: movb $1, %cl
+; AVX2-NEXT: addb $-1, %cl
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm1, %r8
+; AVX2-NEXT: leaq 1(%rdi,%r8), %rcx
+; AVX2-NEXT: adcq %r8, %rdi
+; AVX2-NEXT: setb %dil
+; AVX2-NEXT: movb $1, %r8b
+; AVX2-NEXT: addb $-1, %r8b
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r8
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %r9
+; AVX2-NEXT: leaq 1(%r8,%r9), %r10
+; AVX2-NEXT: adcq %r9, %r8
+; AVX2-NEXT: setb %r8b
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: vpextrq $1, %xmm1, %r9
+; AVX2-NEXT: leaq 1(%rax,%r9), %r11
+; AVX2-NEXT: adcq %r9, %rax
+; AVX2-NEXT: setb %al
+; AVX2-NEXT: movzbl %al, %eax
+; AVX2-NEXT: movzbl %r8b, %r8d
+; AVX2-NEXT: movzbl %dil, %edi
+; AVX2-NEXT: movzbl %dl, %edx
+; AVX2-NEXT: shrdq $1, %rax, %r11
+; AVX2-NEXT: shrdq $1, %r8, %r10
+; AVX2-NEXT: shrdq $1, %rdi, %rcx
+; AVX2-NEXT: shrdq $1, %rdx, %rsi
+; AVX2-NEXT: vmovq %r11, %xmm0
+; AVX2-NEXT: vmovq %r10, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: vmovq %rsi, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovq %xmm0, %rcx
+; AVX512-NEXT: vmovq %xmm1, %rdx
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: movb $1, %sil
+; AVX512-NEXT: addb $-1, %sil
+; AVX512-NEXT: leaq 1(%rcx,%rdx), %rsi
+; AVX512-NEXT: adcq %rdx, %rcx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: movb $1, %cl
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm1, %r8
+; AVX512-NEXT: addb $-1, %cl
+; AVX512-NEXT: leaq 1(%rdi,%r8), %rcx
+; AVX512-NEXT: adcq %r8, %rdi
+; AVX512-NEXT: setb %dil
+; AVX512-NEXT: movb $1, %r8b
+; AVX512-NEXT: addb $-1, %r8b
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT: vmovq %xmm1, %r9
+; AVX512-NEXT: leaq 1(%r8,%r9), %r10
+; AVX512-NEXT: adcq %r9, %r8
+; AVX512-NEXT: setb %r8b
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: vpextrq $1, %xmm1, %r9
+; AVX512-NEXT: leaq 1(%rax,%r9), %r11
+; AVX512-NEXT: adcq %r9, %rax
+; AVX512-NEXT: setb %al
+; AVX512-NEXT: movzbl %al, %eax
+; AVX512-NEXT: movzbl %r8b, %r8d
+; AVX512-NEXT: movzbl %dil, %edi
+; AVX512-NEXT: movzbl %dl, %edx
+; AVX512-NEXT: shrdq $1, %rax, %r11
+; AVX512-NEXT: shrdq $1, %r8, %r10
+; AVX512-NEXT: shrdq $1, %rdi, %rcx
+; AVX512-NEXT: shrdq $1, %rdx, %rsi
+; AVX512-NEXT: vmovq %r11, %xmm0
+; AVX512-NEXT: vmovq %r10, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vmovq %rcx, %xmm1
+; AVX512-NEXT: vmovq %rsi, %xmm2
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i64> %a0 to <4 x i128>
+ %x1 = zext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %inc = add <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %shift = lshr <4 x i128> %inc, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm4, %xmm0
+; SSE-NEXT: pavgb %xmm5, %xmm1
+; SSE-NEXT: pavgb %xmm6, %xmm2
+; SSE-NEXT: pavgb %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %or = or <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = lshr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = sub <64 x i8> %or, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_ext_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgb %xmm4, %xmm0
+; SSE-NEXT: pavgb %xmm5, %xmm1
+; SSE-NEXT: pavgb %xmm6, %xmm2
+; SSE-NEXT: pavgb %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <64 x i8> %a0 to <64 x i16>
+ %x1 = zext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %inc = add <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shift = lshr <64 x i16> %inc, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm4, %xmm0
+; SSE-NEXT: pavgw %xmm5, %xmm1
+; SSE-NEXT: pavgw %xmm6, %xmm2
+; SSE-NEXT: pavgw %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %or = or <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = lshr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <32 x i16> %or, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_ext_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pavgw %xmm4, %xmm0
+; SSE-NEXT: pavgw %xmm5, %xmm1
+; SSE-NEXT: pavgw %xmm6, %xmm2
+; SSE-NEXT: pavgw %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpavgw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpavgw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i16> %a0 to <32 x i32>
+ %x1 = zext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %inc = add <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shift = lshr <32 x i32> %inc, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psrld $1, %xmm7
+; SSE-NEXT: psubd %xmm7, %xmm3
+; SSE-NEXT: psrld $1, %xmm6
+; SSE-NEXT: psubd %xmm6, %xmm9
+; SSE-NEXT: psrld $1, %xmm5
+; SSE-NEXT: psubd %xmm5, %xmm10
+; SSE-NEXT: psrld $1, %xmm4
+; SSE-NEXT: psubd %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = lshr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = sub <16 x i32> %or, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: movdqa %xmm0, %xmm10
+; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm3, %xmm13
+; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm10, %xmm0
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm11, %xmm1
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm12, %xmm2
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm8, %xmm6
+; SSE2-NEXT: movdqa %xmm7, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
+; SSE2-NEXT: paddq %xmm13, %xmm8
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; SSE2-NEXT: paddq %xmm3, %xmm7
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm0
+; SSE2-NEXT: psubq %xmm3, %xmm4
+; SSE2-NEXT: psubq %xmm3, %xmm1
+; SSE2-NEXT: psubq %xmm3, %xmm5
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm3, %xmm6
+; SSE2-NEXT: psubq %xmm3, %xmm8
+; SSE2-NEXT: psubq %xmm3, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: movaps %xmm8, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm8
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm0, %xmm1
+; SSE4-NEXT: pxor %xmm10, %xmm10
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm11 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm12 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm13 = xmm8[0],zero,xmm8[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm1, %xmm4
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm10[2],xmm5[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm2, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm3, %xmm6
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm10[2],xmm7[3],xmm10[3]
+; SSE4-NEXT: paddq %xmm8, %xmm7
+; SSE4-NEXT: paddq %xmm9, %xmm0
+; SSE4-NEXT: paddq %xmm11, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm2
+; SSE4-NEXT: paddq %xmm13, %xmm3
+; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE4-NEXT: psubq %xmm8, %xmm4
+; SSE4-NEXT: psubq %xmm8, %xmm5
+; SSE4-NEXT: psubq %xmm8, %xmm6
+; SSE4-NEXT: psubq %xmm8, %xmm7
+; SSE4-NEXT: psubq %xmm8, %xmm0
+; SSE4-NEXT: psubq %xmm8, %xmm1
+; SSE4-NEXT: psubq %xmm8, %xmm2
+; SSE4-NEXT: psubq %xmm8, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm11 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm11
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm11[2],xmm4[2],xmm11[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm12, %xmm8, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm12
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm12[2],xmm4[2],xmm12[3],xmm4[3]
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm11[0],zero,xmm11[1],zero
+; AVX1-NEXT: vpaddq %xmm2, %xmm6, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm12[0],zero,xmm12[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm9, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT: vpsubq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpsubq %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpsubq %xmm6, %xmm8, %xmm8
+; AVX1-NEXT: vpsubq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpsubq %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm7, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm2, %ymm4, %ymm3
+; AVX2-NEXT: vpsubq %ymm2, %ymm5, %ymm4
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm4[2,3],ymm0[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm3[2,3],ymm1[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm3 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i32> %a0 to <16 x i64>
+ %x1 = zext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %inc = add <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shift = lshr <16 x i64> %inc, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE-LABEL: test_fixed_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: por %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm0, %xmm4
+; SSE-NEXT: pxor %xmm1, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: psrlq $1, %xmm7
+; SSE-NEXT: psubq %xmm7, %xmm3
+; SSE-NEXT: psrlq $1, %xmm6
+; SSE-NEXT: psubq %xmm6, %xmm9
+; SSE-NEXT: psrlq $1, %xmm5
+; SSE-NEXT: psubq %xmm5, %xmm10
+; SSE-NEXT: psrlq $1, %xmm4
+; SSE-NEXT: psubq %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpsubq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %or = or <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = lshr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = sub <8 x i64> %or, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm8, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
+; SSE2-NEXT: movq %xmm8, %rdx
+; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: adcq %rdx, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm3, %r12
+; SSE2-NEXT: movq %xmm7, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %r12, %rax
+; SSE2-NEXT: adcq %rcx, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %r11
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rbx
+; SSE2-NEXT: movq %r11, %rax
+; SSE2-NEXT: adcq %rbx, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm2, %r14
+; SSE2-NEXT: movq %xmm6, %r15
+; SSE2-NEXT: movq %r14, %rax
+; SSE2-NEXT: adcq %r15, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %r13
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %r10
+; SSE2-NEXT: movq %r13, %rax
+; SSE2-NEXT: adcq %r10, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: movq %xmm5, %r8
+; SSE2-NEXT: movq %r9, %rax
+; SSE2-NEXT: adcq %r8, %rax
+; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE2-NEXT: movb $1, %al
+; SSE2-NEXT: addb $-1, %al
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rdi
+; SSE2-NEXT: movq %xmm2, %rsi
+; SSE2-NEXT: movq %rdi, %rdx
+; SSE2-NEXT: adcq %rsi, %rdx
+; SSE2-NEXT: movb $1, %dl
+; SSE2-NEXT: setb %bpl
+; SSE2-NEXT: addb $-1, %dl
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movq %xmm4, %rax
+; SSE2-NEXT: movq %rcx, %rdx
+; SSE2-NEXT: adcq %rax, %rdx
+; SSE2-NEXT: leaq 1(%rcx,%rax), %rdx
+; SSE2-NEXT: leaq 1(%rdi,%rsi), %rax
+; SSE2-NEXT: leaq 1(%r9,%r8), %rcx
+; SSE2-NEXT: leaq 1(%r13,%r10), %rdi
+; SSE2-NEXT: leaq 1(%r14,%r15), %rsi
+; SSE2-NEXT: leaq 1(%r11,%rbx), %r11
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: leaq 1(%r12,%r8), %r9
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: leaq 1(%r8,%r10), %r10
+; SSE2-NEXT: setb %r8b
+; SSE2-NEXT: movzbl %r8b, %r8d
+; SSE2-NEXT: shrdq $1, %r8, %rdx
+; SSE2-NEXT: movzbl %bpl, %r8d
+; SSE2-NEXT: shrdq $1, %r8, %rax
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %rcx
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %rdi
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %rsi
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %r11
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %r9
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
+; SSE2-NEXT: shrdq $1, %r8, %r10
+; SSE2-NEXT: movq %rdx, %xmm0
+; SSE2-NEXT: movq %rax, %xmm4
+; SSE2-NEXT: movq %rcx, %xmm1
+; SSE2-NEXT: movq %rdi, %xmm5
+; SSE2-NEXT: movq %rsi, %xmm2
+; SSE2-NEXT: movq %r11, %xmm6
+; SSE2-NEXT: movq %r9, %xmm3
+; SSE2-NEXT: movq %r10, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: movq %xmm3, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm7, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: adcq %rdx, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm3, %r12
+; SSE4-NEXT: pextrq $1, %xmm7, %rbp
+; SSE4-NEXT: movq %r12, %rax
+; SSE4-NEXT: adcq %rbp, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %xmm2, %r11
+; SSE4-NEXT: movq %xmm6, %rbx
+; SSE4-NEXT: movq %r11, %rax
+; SSE4-NEXT: adcq %rbx, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm2, %r14
+; SSE4-NEXT: pextrq $1, %xmm6, %r15
+; SSE4-NEXT: movq %r14, %rax
+; SSE4-NEXT: adcq %r15, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %xmm1, %r13
+; SSE4-NEXT: movq %xmm5, %r10
+; SSE4-NEXT: movq %r13, %rax
+; SSE4-NEXT: adcq %r10, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: pextrq $1, %xmm1, %r9
+; SSE4-NEXT: pextrq $1, %xmm5, %r8
+; SSE4-NEXT: movq %r9, %rax
+; SSE4-NEXT: adcq %r8, %rax
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: movb $1, %al
+; SSE4-NEXT: addb $-1, %al
+; SSE4-NEXT: movq %xmm0, %rdi
+; SSE4-NEXT: movq %xmm4, %rsi
+; SSE4-NEXT: movq %rdi, %rdx
+; SSE4-NEXT: adcq %rsi, %rdx
+; SSE4-NEXT: movb $1, %dl
+; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; SSE4-NEXT: addb $-1, %dl
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: pextrq $1, %xmm4, %rax
+; SSE4-NEXT: movq %rcx, %rdx
+; SSE4-NEXT: adcq %rax, %rdx
+; SSE4-NEXT: leaq 1(%rcx,%rax), %rdx
+; SSE4-NEXT: leaq 1(%rdi,%rsi), %rax
+; SSE4-NEXT: leaq 1(%r9,%r8), %rcx
+; SSE4-NEXT: leaq 1(%r13,%r10), %rdi
+; SSE4-NEXT: leaq 1(%r14,%r15), %rsi
+; SSE4-NEXT: leaq 1(%r11,%rbx), %r11
+; SSE4-NEXT: leaq 1(%r12,%rbp), %r8
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE4-NEXT: leaq 1(%r9,%r10), %r9
+; SSE4-NEXT: setb %r10b
+; SSE4-NEXT: movzbl %r10b, %r10d
+; SSE4-NEXT: shrdq $1, %r10, %rdx
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rax
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rcx
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rdi
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %rsi
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %r11
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %r8
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: shrdq $1, %r10, %r9
+; SSE4-NEXT: movq %rdx, %xmm4
+; SSE4-NEXT: movq %rax, %xmm0
+; SSE4-NEXT: movq %rcx, %xmm5
+; SSE4-NEXT: movq %rdi, %xmm1
+; SSE4-NEXT: movq %rsi, %xmm6
+; SSE4-NEXT: movq %r11, %xmm2
+; SSE4-NEXT: movq %r8, %xmm7
+; SSE4-NEXT: movq %r9, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vmovq %xmm1, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm3, %rdx
+; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: adcq %rdx, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm1, %r12
+; AVX1-NEXT: vpextrq $1, %xmm3, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %r12, %rax
+; AVX1-NEXT: adcq %rcx, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovq %xmm1, %r11
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vmovq %xmm3, %rbx
+; AVX1-NEXT: movq %r11, %rax
+; AVX1-NEXT: adcq %rbx, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm1, %r14
+; AVX1-NEXT: vpextrq $1, %xmm3, %r15
+; AVX1-NEXT: movq %r14, %rax
+; AVX1-NEXT: adcq %r15, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vmovq %xmm0, %r13
+; AVX1-NEXT: vmovq %xmm2, %r10
+; AVX1-NEXT: movq %r13, %rax
+; AVX1-NEXT: adcq %r10, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vpextrq $1, %xmm0, %r9
+; AVX1-NEXT: vpextrq $1, %xmm2, %r8
+; AVX1-NEXT: movq %r9, %rax
+; AVX1-NEXT: adcq %r8, %rax
+; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX1-NEXT: movb $1, %al
+; AVX1-NEXT: addb $-1, %al
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-NEXT: vmovq %xmm0, %rdi
+; AVX1-NEXT: vmovq %xmm1, %rsi
+; AVX1-NEXT: movq %rdi, %rcx
+; AVX1-NEXT: adcq %rsi, %rcx
+; AVX1-NEXT: movb $1, %cl
+; AVX1-NEXT: setb %bpl
+; AVX1-NEXT: addb $-1, %cl
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: adcq %rax, %rcx
+; AVX1-NEXT: leaq 1(%rdx,%rax), %rcx
+; AVX1-NEXT: leaq 1(%rdi,%rsi), %rax
+; AVX1-NEXT: leaq 1(%r9,%r8), %rdx
+; AVX1-NEXT: leaq 1(%r13,%r10), %rdi
+; AVX1-NEXT: leaq 1(%r14,%r15), %rsi
+; AVX1-NEXT: leaq 1(%r11,%rbx), %r11
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX1-NEXT: leaq 1(%r12,%r8), %r9
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX1-NEXT: leaq 1(%r8,%r10), %r8
+; AVX1-NEXT: setb %r10b
+; AVX1-NEXT: movzbl %r10b, %r10d
+; AVX1-NEXT: shrdq $1, %r10, %rcx
+; AVX1-NEXT: movzbl %bpl, %r10d
+; AVX1-NEXT: shrdq $1, %r10, %rax
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %rdx
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %rdi
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %rsi
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %r11
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %r9
+; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX1-NEXT: shrdq $1, %r10, %r8
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vmovq %rdx, %xmm2
+; AVX1-NEXT: vmovq %rdi, %xmm3
+; AVX1-NEXT: vmovq %rsi, %xmm4
+; AVX1-NEXT: vmovq %r11, %xmm5
+; AVX1-NEXT: vmovq %r9, %xmm6
+; AVX1-NEXT: vmovq %r8, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vmovq %xmm1, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm3, %rdx
+; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: adcq %rdx, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm1, %r12
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %r12, %rax
+; AVX2-NEXT: adcq %rcx, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %r11
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
+; AVX2-NEXT: vmovq %xmm3, %rbx
+; AVX2-NEXT: movq %r11, %rax
+; AVX2-NEXT: adcq %rbx, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm1, %r14
+; AVX2-NEXT: vpextrq $1, %xmm3, %r15
+; AVX2-NEXT: movq %r14, %rax
+; AVX2-NEXT: adcq %r15, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vmovq %xmm0, %r13
+; AVX2-NEXT: vmovq %xmm2, %r10
+; AVX2-NEXT: movq %r13, %rax
+; AVX2-NEXT: adcq %r10, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vpextrq $1, %xmm0, %r9
+; AVX2-NEXT: vpextrq $1, %xmm2, %r8
+; AVX2-NEXT: movq %r9, %rax
+; AVX2-NEXT: adcq %r8, %rax
+; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX2-NEXT: movb $1, %al
+; AVX2-NEXT: addb $-1, %al
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm1
+; AVX2-NEXT: vmovq %xmm0, %rdi
+; AVX2-NEXT: vmovq %xmm1, %rsi
+; AVX2-NEXT: movq %rdi, %rcx
+; AVX2-NEXT: adcq %rsi, %rcx
+; AVX2-NEXT: movb $1, %cl
+; AVX2-NEXT: setb %bpl
+; AVX2-NEXT: addb $-1, %cl
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: adcq %rax, %rcx
+; AVX2-NEXT: leaq 1(%rdx,%rax), %rcx
+; AVX2-NEXT: leaq 1(%rdi,%rsi), %rax
+; AVX2-NEXT: leaq 1(%r9,%r8), %rdx
+; AVX2-NEXT: leaq 1(%r13,%r10), %rdi
+; AVX2-NEXT: leaq 1(%r14,%r15), %rsi
+; AVX2-NEXT: leaq 1(%r11,%rbx), %r11
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX2-NEXT: leaq 1(%r12,%r8), %r9
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: leaq 1(%r8,%r10), %r8
+; AVX2-NEXT: setb %r10b
+; AVX2-NEXT: movzbl %r10b, %r10d
+; AVX2-NEXT: shrdq $1, %r10, %rcx
+; AVX2-NEXT: movzbl %bpl, %r10d
+; AVX2-NEXT: shrdq $1, %r10, %rax
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %rdx
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %rdi
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %rsi
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %r11
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %r9
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX2-NEXT: shrdq $1, %r10, %r8
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vmovq %rdx, %xmm2
+; AVX2-NEXT: vmovq %rdi, %xmm3
+; AVX2-NEXT: vmovq %rsi, %xmm4
+; AVX2-NEXT: vmovq %r11, %xmm5
+; AVX2-NEXT: vmovq %r9, %xmm6
+; AVX2-NEXT: vmovq %r8, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vmovq %xmm0, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vmovq %xmm1, %rdx
+; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: adcq %rdx, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: vpextrq $1, %xmm0, %r12
+; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: movq %r12, %rax
+; AVX512-NEXT: adcq %rcx, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %r11
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512-NEXT: vmovq %xmm3, %rbx
+; AVX512-NEXT: movq %r11, %rax
+; AVX512-NEXT: adcq %rbx, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vpextrq $1, %xmm2, %r14
+; AVX512-NEXT: vpextrq $1, %xmm3, %r15
+; AVX512-NEXT: movq %r14, %rax
+; AVX512-NEXT: adcq %r15, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %r13
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512-NEXT: vmovq %xmm1, %r10
+; AVX512-NEXT: movq %r13, %rax
+; AVX512-NEXT: adcq %r10, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: vpextrq $1, %xmm0, %r9
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vpextrq $1, %xmm1, %r8
+; AVX512-NEXT: movq %r9, %rax
+; AVX512-NEXT: adcq %r8, %rax
+; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; AVX512-NEXT: movb $1, %al
+; AVX512-NEXT: addb $-1, %al
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT: vmovq %xmm0, %rdi
+; AVX512-NEXT: vmovq %xmm1, %rsi
+; AVX512-NEXT: movq %rdi, %rcx
+; AVX512-NEXT: adcq %rsi, %rcx
+; AVX512-NEXT: movb $1, %cl
+; AVX512-NEXT: setb %bpl
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512-NEXT: vpextrq $1, %xmm1, %rax
+; AVX512-NEXT: addb $-1, %cl
+; AVX512-NEXT: movq %rdx, %rcx
+; AVX512-NEXT: adcq %rax, %rcx
+; AVX512-NEXT: leaq 1(%rdx,%rax), %rcx
+; AVX512-NEXT: leaq 1(%rdi,%rsi), %rax
+; AVX512-NEXT: leaq 1(%r9,%r8), %rdx
+; AVX512-NEXT: leaq 1(%r13,%r10), %rdi
+; AVX512-NEXT: leaq 1(%r14,%r15), %rsi
+; AVX512-NEXT: leaq 1(%r11,%rbx), %r11
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: leaq 1(%r12,%r8), %r9
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX512-NEXT: leaq 1(%r8,%r10), %r8
+; AVX512-NEXT: setb %r10b
+; AVX512-NEXT: movzbl %r10b, %r10d
+; AVX512-NEXT: shrdq $1, %r10, %rcx
+; AVX512-NEXT: movzbl %bpl, %r10d
+; AVX512-NEXT: shrdq $1, %r10, %rax
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %rdx
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %rdi
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %rsi
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %r11
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %r9
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; AVX512-NEXT: shrdq $1, %r10, %r8
+; AVX512-NEXT: vmovq %rcx, %xmm0
+; AVX512-NEXT: vmovq %rax, %xmm1
+; AVX512-NEXT: vmovq %rdx, %xmm2
+; AVX512-NEXT: vmovq %rdi, %xmm3
+; AVX512-NEXT: vmovq %rsi, %xmm4
+; AVX512-NEXT: vmovq %r11, %xmm5
+; AVX512-NEXT: vmovq %r9, %xmm6
+; AVX512-NEXT: vmovq %r8, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i64> %a0 to <8 x i128>
+ %x1 = zext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %inc = add <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %shift = lshr <8 x i128> %inc, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/avgfloors.ll b/llvm/test/CodeGen/X86/avgfloors.ll
new file mode 100644
index 0000000..a3864ab
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgfloors.ll
@@ -0,0 +1,3437 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: paddb %xmm2, %xmm0
+; SSE-NEXT: psubb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = ashr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <16 x i8> %and, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE2-LABEL: test_ext_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm3, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm2
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
+; SSE4-NEXT: paddw %xmm2, %xmm4
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE4-NEXT: paddw %xmm3, %xmm0
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm1, %xmm0
+; SSE4-NEXT: pand %xmm1, %xmm4
+; SSE4-NEXT: packuswb %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i8> %a0 to <16 x i16>
+ %x1 = sext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %shift = ashr <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE2-LABEL: test_ext_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm2, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm2
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
+; SSE4-NEXT: paddd %xmm2, %xmm4
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm0
+; SSE4-NEXT: paddd %xmm3, %xmm0
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
+; SSE4-NEXT: packusdw %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i16> %a0 to <8 x i32>
+ %x1 = sext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %shift = ashr <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = ashr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = add <4 x i32> %and, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
+; SSE2-NEXT: paddq %xmm4, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm0
+; SSE4-NEXT: paddq %xmm2, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm1
+; SSE4-NEXT: paddq %xmm3, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i32> %a0 to <4 x i64>
+ %x1 = sext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %shift = ashr <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: pand %xmm1, %xmm2
+; SSE4-NEXT: pxor %xmm1, %xmm0
+; SSE4-NEXT: movdqa %xmm0, %xmm1
+; SSE4-NEXT: psrad $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; SSE4-NEXT: paddq %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX2-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %and = and <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = ashr <2 x i64> %xor, <i64 1, i64 1>
+ %res = add <2 x i64> %and, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %rdx, %rsi
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %rdi, %r8
+; SSE2-NEXT: sarq $63, %r8
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: movq %r9, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: addq %r9, %rdx
+; SSE2-NEXT: adcq %rsi, %r10
+; SSE2-NEXT: addq %rdi, %rax
+; SSE2-NEXT: adcq %rcx, %r8
+; SSE2-NEXT: shldq $63, %rax, %r8
+; SSE2-NEXT: shldq $63, %rdx, %r10
+; SSE2-NEXT: movq %r10, %xmm0
+; SSE2-NEXT: movq %r8, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: pextrq $1, %xmm0, %rdx
+; SSE4-NEXT: movq %rdx, %rsi
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: movq %xmm1, %rdi
+; SSE4-NEXT: movq %rdi, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: pextrq $1, %xmm1, %r9
+; SSE4-NEXT: movq %r9, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: addq %r9, %rdx
+; SSE4-NEXT: adcq %rsi, %r10
+; SSE4-NEXT: addq %rdi, %rax
+; SSE4-NEXT: adcq %rcx, %r8
+; SSE4-NEXT: shldq $63, %rax, %r8
+; SSE4-NEXT: shldq $63, %rdx, %r10
+; SSE4-NEXT: movq %r10, %xmm1
+; SSE4-NEXT: movq %r8, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: test_ext_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: movq %rax, %rcx
+; AVX-NEXT: sarq $63, %rcx
+; AVX-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX-NEXT: movq %rdx, %rsi
+; AVX-NEXT: sarq $63, %rsi
+; AVX-NEXT: vmovq %xmm1, %rdi
+; AVX-NEXT: movq %rdi, %r8
+; AVX-NEXT: sarq $63, %r8
+; AVX-NEXT: vpextrq $1, %xmm1, %r9
+; AVX-NEXT: movq %r9, %r10
+; AVX-NEXT: sarq $63, %r10
+; AVX-NEXT: addq %r9, %rdx
+; AVX-NEXT: adcq %rsi, %r10
+; AVX-NEXT: addq %rdi, %rax
+; AVX-NEXT: adcq %rcx, %r8
+; AVX-NEXT: shldq $63, %rax, %r8
+; AVX-NEXT: shldq $63, %rdx, %r10
+; AVX-NEXT: vmovq %r10, %xmm0
+; AVX-NEXT: vmovq %r8, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+ %x0 = sext <2 x i64> %a0 to <2 x i128>
+ %x1 = sext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %shift = ashr <2 x i128> %sum, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm0
+; SSE-NEXT: paddb %xmm5, %xmm0
+; SSE-NEXT: psubb %xmm3, %xmm0
+; SSE-NEXT: psubb %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
+; AVX512-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %and = and <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = ashr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <32 x i8> %and, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE2-LABEL: test_ext_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm4, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: paddw %xmm5, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm6, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm7, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm4, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm7
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
+; SSE4-NEXT: paddw %xmm5, %xmm4
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm1
+; SSE4-NEXT: paddw %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
+; SSE4-NEXT: paddw %xmm7, %xmm3
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm0
+; SSE4-NEXT: paddw %xmm8, %xmm0
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm2, %xmm0
+; SSE4-NEXT: pand %xmm2, %xmm3
+; SSE4-NEXT: packuswb %xmm3, %xmm0
+; SSE4-NEXT: pand %xmm2, %xmm1
+; SSE4-NEXT: pand %xmm2, %xmm4
+; SSE4-NEXT: packuswb %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i8> %a0 to <32 x i16>
+ %x1 = sext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %shift = ashr <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = ashr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <16 x i16> %and, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE2-LABEL: test_ext_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: paddd %xmm4, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm5, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm7, %xmm1
+; SSE2-NEXT: pslld $15, %xmm8
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm8, %xmm0
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm4, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm7
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
+; SSE4-NEXT: paddd %xmm5, %xmm4
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm1
+; SSE4-NEXT: paddd %xmm6, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
+; SSE4-NEXT: paddd %xmm7, %xmm3
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm0
+; SSE4-NEXT: paddd %xmm8, %xmm0
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm2, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
+; SSE4-NEXT: packusdw %xmm3, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
+; SSE4-NEXT: packusdw %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX2-NEXT: vpackusdw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i16> %a0 to <16 x i32>
+ %x1 = sext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %shift = ashr <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = ashr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <8 x i32> %and, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm6, %xmm8
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; SSE2-NEXT: paddq %xmm7, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm8[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm4
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE4-NEXT: paddq %xmm5, %xmm3
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm0
+; SSE4-NEXT: paddq %xmm6, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm2
+; SSE4-NEXT: paddq %xmm7, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm2[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i32> %a0 to <8 x i64>
+ %x1 = sext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %shift = ashr <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: paddq %xmm5, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: pand %xmm3, %xmm4
+; SSE4-NEXT: movdqa %xmm0, %xmm5
+; SSE4-NEXT: pand %xmm2, %xmm5
+; SSE4-NEXT: pxor %xmm2, %xmm0
+; SSE4-NEXT: pxor %xmm3, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: psrad $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; SSE4-NEXT: paddq %xmm4, %xmm1
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: psrad $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE4-NEXT: paddq %xmm5, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = ashr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = add <4 x i64> %and, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rdx
+; SSE2-NEXT: movq %rdx, %r14
+; SSE2-NEXT: sarq $63, %r14
+; SSE2-NEXT: movq %xmm1, %rcx
+; SSE2-NEXT: movq %rcx, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rsi
+; SSE2-NEXT: movq %rsi, %r11
+; SSE2-NEXT: sarq $63, %r11
+; SSE2-NEXT: movq %xmm0, %r8
+; SSE2-NEXT: movq %r8, %rbx
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rdi
+; SSE2-NEXT: movq %xmm3, %r15
+; SSE2-NEXT: movq %r15, %r9
+; SSE2-NEXT: sarq $63, %r9
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r12
+; SSE2-NEXT: movq %r12, %r13
+; SSE2-NEXT: sarq $63, %r13
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, %rbp
+; SSE2-NEXT: sarq $63, %rbp
+; SSE2-NEXT: addq %rax, %r8
+; SSE2-NEXT: adcq %rbx, %rbp
+; SSE2-NEXT: addq %r12, %rsi
+; SSE2-NEXT: adcq %r11, %r13
+; SSE2-NEXT: addq %r15, %rcx
+; SSE2-NEXT: adcq %r10, %r9
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; SSE2-NEXT: adcq %r14, %rdi
+; SSE2-NEXT: shldq $63, %rdx, %rdi
+; SSE2-NEXT: shldq $63, %rcx, %r9
+; SSE2-NEXT: shldq $63, %rsi, %r13
+; SSE2-NEXT: shldq $63, %r8, %rbp
+; SSE2-NEXT: movq %rbp, %xmm0
+; SSE2-NEXT: movq %r13, %xmm2
+; SSE2-NEXT: movq %r9, %xmm1
+; SSE2-NEXT: movq %rdi, %xmm3
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: movq %xmm1, %rdi
+; SSE4-NEXT: movq %rdi, %r14
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: pextrq $1, %xmm1, %rcx
+; SSE4-NEXT: movq %rcx, %r10
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: movq %xmm0, %rsi
+; SSE4-NEXT: movq %rsi, %r11
+; SSE4-NEXT: sarq $63, %r11
+; SSE4-NEXT: pextrq $1, %xmm0, %r8
+; SSE4-NEXT: movq %r8, %rbx
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: movq %xmm3, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rdx
+; SSE4-NEXT: pextrq $1, %xmm3, %r15
+; SSE4-NEXT: movq %r15, %r9
+; SSE4-NEXT: sarq $63, %r9
+; SSE4-NEXT: movq %xmm2, %r12
+; SSE4-NEXT: movq %r12, %r13
+; SSE4-NEXT: sarq $63, %r13
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: movq %rax, %rbp
+; SSE4-NEXT: sarq $63, %rbp
+; SSE4-NEXT: addq %rax, %r8
+; SSE4-NEXT: adcq %rbx, %rbp
+; SSE4-NEXT: addq %r12, %rsi
+; SSE4-NEXT: adcq %r11, %r13
+; SSE4-NEXT: addq %r15, %rcx
+; SSE4-NEXT: adcq %r10, %r9
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; SSE4-NEXT: adcq %r14, %rdx
+; SSE4-NEXT: shldq $63, %rdi, %rdx
+; SSE4-NEXT: shldq $63, %rcx, %r9
+; SSE4-NEXT: shldq $63, %rsi, %r13
+; SSE4-NEXT: shldq $63, %r8, %rbp
+; SSE4-NEXT: movq %rbp, %xmm2
+; SSE4-NEXT: movq %r13, %xmm0
+; SSE4-NEXT: movq %r9, %xmm3
+; SSE4-NEXT: movq %rdx, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vmovq %xmm0, %rdx
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX1-NEXT: movq %rcx, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rsi
+; AVX1-NEXT: movq %rsi, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX1-NEXT: movq %rdi, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vmovq %xmm1, %r8
+; AVX1-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpextrq $1, %xmm1, %r15
+; AVX1-NEXT: movq %r15, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r12
+; AVX1-NEXT: movq %r12, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: movq %rax, %rbp
+; AVX1-NEXT: sarq $63, %rbp
+; AVX1-NEXT: addq %rax, %rdi
+; AVX1-NEXT: adcq %rbx, %rbp
+; AVX1-NEXT: addq %r12, %rsi
+; AVX1-NEXT: adcq %r11, %r13
+; AVX1-NEXT: addq %r15, %rcx
+; AVX1-NEXT: adcq %r10, %r9
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; AVX1-NEXT: adcq %r14, %r8
+; AVX1-NEXT: shldq $63, %rdx, %r8
+; AVX1-NEXT: shldq $63, %rcx, %r9
+; AVX1-NEXT: shldq $63, %rsi, %r13
+; AVX1-NEXT: shldq $63, %rdi, %rbp
+; AVX1-NEXT: vmovq %rbp, %xmm0
+; AVX1-NEXT: vmovq %r13, %xmm1
+; AVX1-NEXT: vmovq %r9, %xmm2
+; AVX1-NEXT: vmovq %r8, %xmm3
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vmovq %xmm0, %rdx
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: movq %rcx, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rsi
+; AVX2-NEXT: movq %rsi, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX2-NEXT: movq %rdi, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vmovq %xmm1, %r8
+; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpextrq $1, %xmm1, %r15
+; AVX2-NEXT: movq %r15, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r12
+; AVX2-NEXT: movq %r12, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: movq %rax, %rbp
+; AVX2-NEXT: sarq $63, %rbp
+; AVX2-NEXT: addq %rax, %rdi
+; AVX2-NEXT: adcq %rbx, %rbp
+; AVX2-NEXT: addq %r12, %rsi
+; AVX2-NEXT: adcq %r11, %r13
+; AVX2-NEXT: addq %r15, %rcx
+; AVX2-NEXT: adcq %r10, %r9
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; AVX2-NEXT: adcq %r14, %r8
+; AVX2-NEXT: shldq $63, %rdx, %r8
+; AVX2-NEXT: shldq $63, %rcx, %r9
+; AVX2-NEXT: shldq $63, %rsi, %r13
+; AVX2-NEXT: shldq $63, %rdi, %rbp
+; AVX2-NEXT: vmovq %rbp, %xmm0
+; AVX2-NEXT: vmovq %r13, %xmm1
+; AVX2-NEXT: vmovq %r9, %xmm2
+; AVX2-NEXT: vmovq %r8, %xmm3
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vmovq %xmm0, %rdx
+; AVX512-NEXT: movq %rdx, %r14
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: movq %rcx, %r10
+; AVX512-NEXT: sarq $63, %r10
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %rsi
+; AVX512-NEXT: movq %rsi, %r11
+; AVX512-NEXT: sarq $63, %r11
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
+; AVX512-NEXT: movq %rdi, %rbx
+; AVX512-NEXT: sarq $63, %rbx
+; AVX512-NEXT: vmovq %xmm1, %r8
+; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r8
+; AVX512-NEXT: vpextrq $1, %xmm1, %r15
+; AVX512-NEXT: movq %r15, %r9
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r12
+; AVX512-NEXT: movq %r12, %r13
+; AVX512-NEXT: sarq $63, %r13
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: movq %rax, %rbp
+; AVX512-NEXT: sarq $63, %rbp
+; AVX512-NEXT: addq %rax, %rdi
+; AVX512-NEXT: adcq %rbx, %rbp
+; AVX512-NEXT: addq %r12, %rsi
+; AVX512-NEXT: adcq %r11, %r13
+; AVX512-NEXT: addq %r15, %rcx
+; AVX512-NEXT: adcq %r10, %r9
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; AVX512-NEXT: adcq %r14, %r8
+; AVX512-NEXT: shldq $63, %rdx, %r8
+; AVX512-NEXT: shldq $63, %rcx, %r9
+; AVX512-NEXT: shldq $63, %rsi, %r13
+; AVX512-NEXT: shldq $63, %rdi, %rbp
+; AVX512-NEXT: vmovq %rbp, %xmm0
+; AVX512-NEXT: vmovq %r13, %xmm1
+; AVX512-NEXT: vmovq %r9, %xmm2
+; AVX512-NEXT: vmovq %r8, %xmm3
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <4 x i64> %a0 to <4 x i128>
+ %x1 = sext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %shift = ashr <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm10
+; SSE-NEXT: pand %xmm7, %xmm10
+; SSE-NEXT: movdqa %xmm2, %xmm11
+; SSE-NEXT: pand %xmm6, %xmm11
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pand %xmm5, %xmm9
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm5, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm4, %xmm3
+; SSE-NEXT: paddb %xmm10, %xmm3
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: pxor %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm11, %xmm2
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm9, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm8, %xmm0
+; SSE-NEXT: psubb %xmm4, %xmm0
+; SSE-NEXT: psubb %xmm4, %xmm1
+; SSE-NEXT: psubb %xmm4, %xmm2
+; SSE-NEXT: psubb %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm7
+; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm5, %xmm7, %xmm3
+; AVX1-NEXT: vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
+; AVX512-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpsubb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %and = and <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = ashr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <64 x i8> %and, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE2-LABEL: test_ext_v64i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3],xmm13[4],xmm3[4],xmm13[5],xmm3[5],xmm13[6],xmm3[6],xmm13[7],xmm3[7]
+; SSE2-NEXT: psraw $8, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm3[8],xmm14[9],xmm3[9],xmm14[10],xmm3[10],xmm14[11],xmm3[11],xmm14[12],xmm3[12],xmm14[13],xmm3[13],xmm14[14],xmm3[14],xmm14[15],xmm3[15]
+; SSE2-NEXT: psraw $8, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3],xmm15[4],xmm2[4],xmm15[5],xmm2[5],xmm15[6],xmm2[6],xmm15[7],xmm2[7]
+; SSE2-NEXT: psraw $8, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm2[8],xmm12[9],xmm2[9],xmm12[10],xmm2[10],xmm12[11],xmm2[11],xmm12[12],xmm2[12],xmm12[13],xmm2[13],xmm12[14],xmm2[14],xmm12[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm1[8],xmm10[9],xmm1[9],xmm10[10],xmm1[10],xmm10[11],xmm1[11],xmm10[12],xmm1[12],xmm10[13],xmm1[13],xmm10[14],xmm1[14],xmm10[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm8
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: paddw %xmm13, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: paddw %xmm14, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: paddw %xmm15, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: paddw %xmm12, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: paddw %xmm11, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: paddw %xmm10, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: paddw %xmm9, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: paddw %xmm8, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: pand %xmm8, %xmm1
+; SSE2-NEXT: packuswb %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm8, %xmm7
+; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: packuswb %xmm7, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v64i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm8, %xmm9
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm3, %xmm11
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm2, %xmm13
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm1, %xmm15
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm0
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
+; SSE4-NEXT: paddw %xmm9, %xmm8
+; SSE4-NEXT: pmovsxbw %xmm7, %xmm3
+; SSE4-NEXT: paddw %xmm10, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm7
+; SSE4-NEXT: paddw %xmm11, %xmm7
+; SSE4-NEXT: pmovsxbw %xmm6, %xmm2
+; SSE4-NEXT: paddw %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm6
+; SSE4-NEXT: paddw %xmm13, %xmm6
+; SSE4-NEXT: pmovsxbw %xmm5, %xmm1
+; SSE4-NEXT: paddw %xmm14, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxbw %xmm0, %xmm5
+; SSE4-NEXT: paddw %xmm15, %xmm5
+; SSE4-NEXT: pmovsxbw %xmm4, %xmm0
+; SSE4-NEXT: paddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE4-NEXT: psrlw $1, %xmm8
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm6
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm5
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE4-NEXT: pand %xmm4, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm5
+; SSE4-NEXT: packuswb %xmm5, %xmm0
+; SSE4-NEXT: pand %xmm4, %xmm1
+; SSE4-NEXT: pand %xmm4, %xmm6
+; SSE4-NEXT: packuswb %xmm6, %xmm1
+; SSE4-NEXT: pand %xmm4, %xmm2
+; SSE4-NEXT: pand %xmm4, %xmm7
+; SSE4-NEXT: packuswb %xmm7, %xmm2
+; SSE4-NEXT: pand %xmm4, %xmm3
+; SSE4-NEXT: pand %xmm4, %xmm8
+; SSE4-NEXT: packuswb %xmm8, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm6, %xmm6
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
+; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm10
+; AVX1-NEXT: vpaddw %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
+; AVX1-NEXT: vpaddw %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm7
+; AVX1-NEXT: vpaddw %xmm7, %xmm8, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxbw %xmm8, %xmm8
+; AVX1-NEXT: vpaddw %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm5
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7
+; AVX1-NEXT: vpackuswb %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm8, %xmm6
+; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1
+; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm6
+; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
+; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm3
+; AVX2-NEXT: vpaddw %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm3, %ymm3
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpackuswb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpackuswb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovsxbw %ymm2, %zmm2
+; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovsxbw %ymm3, %zmm3
+; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <64 x i8> %a0 to <64 x i16>
+ %x1 = sext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %shift = ashr <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psraw $1, %xmm3
+; SSE-NEXT: paddw %xmm8, %xmm3
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: paddw %xmm9, %xmm2
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm10, %xmm1
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddw %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = ashr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <32 x i16> %and, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE2-LABEL: test_ext_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
+; SSE2-NEXT: psrad $16, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1],xmm14[2],xmm1[2],xmm14[3],xmm1[3]
+; SSE2-NEXT: psrad $16, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm2[4],xmm13[5],xmm2[5],xmm13[6],xmm2[6],xmm13[7],xmm2[7]
+; SSE2-NEXT: psrad $16, %xmm13
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
+; SSE2-NEXT: psrad $16, %xmm11
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; SSE2-NEXT: psrad $16, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: paddd %xmm8, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: paddd %xmm15, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: paddd %xmm3, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: paddd %xmm14, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: paddd %xmm13, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: paddd %xmm12, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm11, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: paddd %xmm10, %xmm3
+; SSE2-NEXT: pslld $15, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm9, %xmm0
+; SSE2-NEXT: pslld $15, %xmm8
+; SSE2-NEXT: psrad $16, %xmm8
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm8, %xmm1
+; SSE2-NEXT: pslld $15, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm5, %xmm2
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: packssdw %xmm4, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm8, %xmm9
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm3, %xmm11
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm2, %xmm13
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm1, %xmm15
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm0
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
+; SSE4-NEXT: paddd %xmm9, %xmm8
+; SSE4-NEXT: pmovsxwd %xmm7, %xmm3
+; SSE4-NEXT: paddd %xmm10, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm7
+; SSE4-NEXT: paddd %xmm11, %xmm7
+; SSE4-NEXT: pmovsxwd %xmm6, %xmm2
+; SSE4-NEXT: paddd %xmm12, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm6
+; SSE4-NEXT: paddd %xmm13, %xmm6
+; SSE4-NEXT: pmovsxwd %xmm5, %xmm1
+; SSE4-NEXT: paddd %xmm14, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxwd %xmm0, %xmm5
+; SSE4-NEXT: paddd %xmm15, %xmm5
+; SSE4-NEXT: pmovsxwd %xmm4, %xmm0
+; SSE4-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE4-NEXT: psrld $1, %xmm8
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm7
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm6
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm5
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: pxor %xmm4, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm4[1],xmm5[2],xmm4[3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm5, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm6, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2],xmm4[3],xmm7[4],xmm4[5],xmm7[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm7, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; SSE4-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0],xmm4[1],xmm8[2],xmm4[3],xmm8[4],xmm4[5],xmm8[6],xmm4[7]
+; SSE4-NEXT: packusdw %xmm8, %xmm3
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm6, %xmm6
+; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
+; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm10
+; AVX1-NEXT: vpaddd %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
+; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm6
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxwd %xmm8, %xmm8
+; AVX1-NEXT: vpaddd %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm6
+; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
+; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm3
+; AVX2-NEXT: vpaddd %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7],ymm3[8],ymm4[9],ymm3[10],ymm4[11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7],ymm2[8],ymm4[9],ymm2[10],ymm4[11],ymm2[12],ymm4[13],ymm2[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovsxwd %ymm2, %zmm2
+; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovsxwd %ymm3, %zmm3
+; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <32 x i16> %a0 to <32 x i32>
+ %x1 = sext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %shift = ashr <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrad $1, %xmm3
+; SSE-NEXT: paddd %xmm8, %xmm3
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: paddd %xmm9, %xmm2
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: paddd %xmm10, %xmm1
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = ashr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <16 x i32> %and, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm3[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm13, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm2[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm12, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm1[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm9, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
+; SSE2-NEXT: pxor %xmm10, %xmm10
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm10
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
+; SSE2-NEXT: pxor %xmm10, %xmm10
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm10
+; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
+; SSE2-NEXT: pxor %xmm14, %xmm14
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm14
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
+; SSE2-NEXT: paddq %xmm7, %xmm3
+; SSE2-NEXT: pxor %xmm7, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; SSE2-NEXT: paddq %xmm13, %xmm10
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm13
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; SSE2-NEXT: paddq %xmm12, %xmm7
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
+; SSE2-NEXT: paddq %xmm5, %xmm1
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm11, %xmm6
+; SSE2-NEXT: pxor %xmm11, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm9, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm10
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm10[0,2]
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm8
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm3, %xmm9
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm10
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm2, %xmm11
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm12
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm1, %xmm13
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm14
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm15
+; SSE4-NEXT: pmovsxdq %xmm7, %xmm3
+; SSE4-NEXT: paddq %xmm8, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
+; SSE4-NEXT: paddq %xmm9, %xmm7
+; SSE4-NEXT: pmovsxdq %xmm6, %xmm2
+; SSE4-NEXT: paddq %xmm10, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
+; SSE4-NEXT: paddq %xmm11, %xmm6
+; SSE4-NEXT: pmovsxdq %xmm5, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm1
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm0, %xmm5
+; SSE4-NEXT: paddq %xmm13, %xmm5
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm0
+; SSE4-NEXT: paddq %xmm14, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxdq %xmm4, %xmm4
+; SSE4-NEXT: paddq %xmm15, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
+; AVX1-NEXT: vpmovsxdq %xmm4, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxdq %xmm7, %xmm7
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm10
+; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm10[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm11, %xmm11
+; AVX1-NEXT: vpaddq %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm11, %xmm11
+; AVX1-NEXT: vpaddq %xmm6, %xmm11, %xmm6
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
+; AVX1-NEXT: vpaddq %xmm10, %xmm8, %xmm8
+; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm2[2,3,2,3]
+; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
+; AVX1-NEXT: vpaddq %xmm10, %xmm9, %xmm9
+; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm9, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm3
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovsxdq %xmm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm3, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm5[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],ymm4[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovsxdq %ymm2, %zmm2
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovsxdq %ymm3, %zmm3
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = sext <16 x i32> %a0 to <16 x i64>
+ %x1 = sext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %shift = ashr <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_fixed_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm11
+; SSE2-NEXT: pand %xmm7, %xmm11
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: pand %xmm6, %xmm10
+; SSE2-NEXT: movdqa %xmm1, %xmm9
+; SSE2-NEXT: pand %xmm5, %xmm9
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: pand %xmm4, %xmm8
+; SSE2-NEXT: pxor %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm1, %xmm5
+; SSE2-NEXT: pxor %xmm2, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm11, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE2-NEXT: paddq %xmm8, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_fixed_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm10
+; SSE4-NEXT: pand %xmm7, %xmm10
+; SSE4-NEXT: movdqa %xmm2, %xmm11
+; SSE4-NEXT: pand %xmm6, %xmm11
+; SSE4-NEXT: movdqa %xmm1, %xmm9
+; SSE4-NEXT: pand %xmm5, %xmm9
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pand %xmm4, %xmm8
+; SSE4-NEXT: pxor %xmm4, %xmm0
+; SSE4-NEXT: pxor %xmm5, %xmm1
+; SSE4-NEXT: pxor %xmm6, %xmm2
+; SSE4-NEXT: pxor %xmm7, %xmm3
+; SSE4-NEXT: movdqa %xmm3, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm10, %xmm3
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm11, %xmm2
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm9, %xmm1
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE4-NEXT: paddq %xmm8, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm6
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX2-NEXT: vpaddq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = ashr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = add <8 x i64> %and, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: .cfi_def_cfa_offset 64
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm8, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; SSE2-NEXT: movq %xmm3, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq %xmm1, %rbp
+; SSE2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rbp
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rbx
+; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: movq %xmm0, %r15
+; SSE2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r15
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r10
+; SSE2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: movq %xmm7, %r9
+; SSE2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r9
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r12
+; SSE2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r12
+; SSE2-NEXT: movq %xmm6, %r13
+; SSE2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: sarq $63, %r13
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r14
+; SSE2-NEXT: movq %r14, %rsi
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: movq %xmm5, %r11
+; SSE2-NEXT: movq %r11, %rdx
+; SSE2-NEXT: sarq $63, %rdx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r8
+; SSE2-NEXT: movq %r8, %rdi
+; SSE2-NEXT: sarq $63, %rdi
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: sarq $63, %rax
+; SSE2-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %r15, %rax
+; SSE2-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %rbx, %rdi
+; SSE2-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE2-NEXT: adcq %rbp, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE2-NEXT: addq %r14, %r15
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE2-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
+; SSE2-NEXT: shldq $63, %rcx, %r10
+; SSE2-NEXT: shldq $63, %r8, %r9
+; SSE2-NEXT: shldq $63, %r11, %r12
+; SSE2-NEXT: shldq $63, %rbx, %r13
+; SSE2-NEXT: shldq $63, %r15, %rsi
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rcx, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rcx, %rdi
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rcx, %rax
+; SSE2-NEXT: movq %rax, %xmm0
+; SSE2-NEXT: movq %rdi, %xmm4
+; SSE2-NEXT: movq %rdx, %xmm1
+; SSE2-NEXT: movq %rsi, %xmm5
+; SSE2-NEXT: movq %r13, %xmm2
+; SSE2-NEXT: movq %r12, %xmm6
+; SSE2-NEXT: movq %r9, %xmm3
+; SSE2-NEXT: movq %r10, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: addq $8, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: pushq %rax
+; SSE4-NEXT: .cfi_def_cfa_offset 64
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: movq %xmm3, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm3, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %xmm1, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq %rax, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: pextrq $1, %xmm1, %rbp
+; SSE4-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rbp
+; SSE4-NEXT: movq %xmm0, %rbx
+; SSE4-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %rbx
+; SSE4-NEXT: pextrq $1, %xmm0, %r14
+; SSE4-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r14
+; SSE4-NEXT: movq %xmm7, %r10
+; SSE4-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: pextrq $1, %xmm7, %r9
+; SSE4-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r9
+; SSE4-NEXT: movq %xmm6, %r15
+; SSE4-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r15
+; SSE4-NEXT: pextrq $1, %xmm6, %r13
+; SSE4-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r13
+; SSE4-NEXT: movq %xmm5, %r12
+; SSE4-NEXT: movq %r12, %rsi
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: pextrq $1, %xmm5, %r11
+; SSE4-NEXT: movq %r11, %rdx
+; SSE4-NEXT: sarq $63, %rdx
+; SSE4-NEXT: movq %xmm4, %r8
+; SSE4-NEXT: movq %r8, %rdi
+; SSE4-NEXT: sarq $63, %rdi
+; SSE4-NEXT: pextrq $1, %xmm4, %rcx
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: sarq $63, %rax
+; SSE4-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE4-NEXT: adcq %r14, %rax
+; SSE4-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE4-NEXT: adcq %rbx, %rdi
+; SSE4-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE4-NEXT: adcq %rbp, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE4-NEXT: addq %r12, %r14
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE4-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
+; SSE4-NEXT: shldq $63, %rcx, %r10
+; SSE4-NEXT: shldq $63, %r8, %r9
+; SSE4-NEXT: shldq $63, %r11, %r15
+; SSE4-NEXT: shldq $63, %rbx, %r13
+; SSE4-NEXT: shldq $63, %r14, %rsi
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rdi
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rcx, %rax
+; SSE4-NEXT: movq %rax, %xmm4
+; SSE4-NEXT: movq %rdi, %xmm0
+; SSE4-NEXT: movq %rdx, %xmm5
+; SSE4-NEXT: movq %rsi, %xmm1
+; SSE4-NEXT: movq %r13, %xmm6
+; SSE4-NEXT: movq %r15, %xmm2
+; SSE4-NEXT: movq %r9, %xmm7
+; SSE4-NEXT: movq %r10, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: addq $8, %rsp
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: pushq %rax
+; AVX1-NEXT: .cfi_def_cfa_offset 64
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: vpextrq $1, %xmm0, %rbp
+; AVX1-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rbp
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rbx
+; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpextrq $1, %xmm0, %r15
+; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vmovq %xmm3, %r9
+; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpextrq $1, %xmm3, %r10
+; AVX1-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r12
+; AVX1-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpextrq $1, %xmm0, %r13
+; AVX1-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vmovq %xmm2, %r14
+; AVX1-NEXT: movq %r14, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpextrq $1, %xmm2, %r11
+; AVX1-NEXT: movq %r11, %rdx
+; AVX1-NEXT: sarq $63, %rdx
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r8
+; AVX1-NEXT: movq %r8, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %r15, %rax
+; AVX1-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %rbx, %rdi
+; AVX1-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: adcq %rbp, %rdx
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX1-NEXT: addq %r14, %r15
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX1-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX1-NEXT: shldq $63, %rcx, %r9
+; AVX1-NEXT: shldq $63, %r8, %r10
+; AVX1-NEXT: shldq $63, %r11, %r12
+; AVX1-NEXT: shldq $63, %rbx, %r13
+; AVX1-NEXT: shldq $63, %r15, %rsi
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rcx, %rdx
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rcx, %rdi
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rcx, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vmovq %rdi, %xmm1
+; AVX1-NEXT: vmovq %rdx, %xmm2
+; AVX1-NEXT: vmovq %rsi, %xmm3
+; AVX1-NEXT: vmovq %r13, %xmm4
+; AVX1-NEXT: vmovq %r12, %xmm5
+; AVX1-NEXT: vmovq %r10, %xmm6
+; AVX1-NEXT: vmovq %r9, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: addq $8, %rsp
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: pushq %rax
+; AVX2-NEXT: .cfi_def_cfa_offset 64
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: vpextrq $1, %xmm0, %rbp
+; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rbp
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rbx
+; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpextrq $1, %xmm0, %r15
+; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vmovq %xmm3, %r9
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpextrq $1, %xmm3, %r10
+; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r12
+; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpextrq $1, %xmm0, %r13
+; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vmovq %xmm2, %r14
+; AVX2-NEXT: movq %r14, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpextrq $1, %xmm2, %r11
+; AVX2-NEXT: movq %r11, %rdx
+; AVX2-NEXT: sarq $63, %rdx
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r8
+; AVX2-NEXT: movq %r8, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %r15, %rax
+; AVX2-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %rbx, %rdi
+; AVX2-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX2-NEXT: adcq %rbp, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: addq %r14, %r15
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX2-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX2-NEXT: shldq $63, %rcx, %r9
+; AVX2-NEXT: shldq $63, %r8, %r10
+; AVX2-NEXT: shldq $63, %r11, %r12
+; AVX2-NEXT: shldq $63, %rbx, %r13
+; AVX2-NEXT: shldq $63, %r15, %rsi
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rcx, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rcx, %rdi
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rcx, %rax
+; AVX2-NEXT: vmovq %rax, %xmm0
+; AVX2-NEXT: vmovq %rdi, %xmm1
+; AVX2-NEXT: vmovq %rdx, %xmm2
+; AVX2-NEXT: vmovq %rsi, %xmm3
+; AVX2-NEXT: vmovq %r13, %xmm4
+; AVX2-NEXT: vmovq %r12, %xmm5
+; AVX2-NEXT: vmovq %r10, %xmm6
+; AVX2-NEXT: vmovq %r9, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: addq $8, %rsp
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: pushq %rax
+; AVX512-NEXT: .cfi_def_cfa_offset 64
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: sarq $63, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: vpextrq $1, %xmm0, %r13
+; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r13
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r14
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r14
+; AVX512-NEXT: vpextrq $1, %xmm0, %r15
+; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r15
+; AVX512-NEXT: vmovq %xmm1, %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r9
+; AVX512-NEXT: vpextrq $1, %xmm1, %r11
+; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r11
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r12
+; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %r12
+; AVX512-NEXT: vpextrq $1, %xmm0, %rbp
+; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: sarq $63, %rbp
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rbx
+; AVX512-NEXT: movq %rbx, %rsi
+; AVX512-NEXT: sarq $63, %rsi
+; AVX512-NEXT: vpextrq $1, %xmm0, %r10
+; AVX512-NEXT: movq %r10, %rdx
+; AVX512-NEXT: sarq $63, %rdx
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: movq %r8, %rdi
+; AVX512-NEXT: sarq $63, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: sarq $63, %rax
+; AVX512-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT: adcq %r15, %rax
+; AVX512-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT: adcq %r14, %rdi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX512-NEXT: addq %r10, %r15
+; AVX512-NEXT: adcq %r13, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX512-NEXT: addq %rbx, %r14
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX512-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
+; AVX512-NEXT: shldq $63, %rcx, %r9
+; AVX512-NEXT: shldq $63, %r8, %r11
+; AVX512-NEXT: shldq $63, %r10, %r12
+; AVX512-NEXT: shldq $63, %rbx, %rbp
+; AVX512-NEXT: shldq $63, %r14, %rsi
+; AVX512-NEXT: shldq $63, %r15, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rcx, %rdi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rcx, %rax
+; AVX512-NEXT: vmovq %rax, %xmm0
+; AVX512-NEXT: vmovq %rdi, %xmm1
+; AVX512-NEXT: vmovq %rdx, %xmm2
+; AVX512-NEXT: vmovq %rsi, %xmm3
+; AVX512-NEXT: vmovq %rbp, %xmm4
+; AVX512-NEXT: vmovq %r12, %xmm5
+; AVX512-NEXT: vmovq %r11, %xmm6
+; AVX512-NEXT: vmovq %r9, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: addq $8, %rsp
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = sext <8 x i64> %a0 to <8 x i128>
+ %x1 = sext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %shift = ashr <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/avgflooru.ll b/llvm/test/CodeGen/X86/avgflooru.ll
new file mode 100644
index 0000000..e07c1f5
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avgflooru.ll
@@ -0,0 +1,2629 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+;
+; 128-bit vectors
+;
+
+define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_fixed_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: paddb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i8> %a0, %a1
+ %xor = xor <16 x i8> %a0, %a1
+ %shift = lshr <16 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <16 x i8> %and, %shift
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE2-LABEL: test_ext_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; SSE2-NEXT: paddw %xmm3, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; SSE4-NEXT: paddw %xmm0, %xmm1
+; SSE4-NEXT: paddw %xmm4, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: packuswb %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i8> %a0 to <16 x i16>
+ %x1 = zext <16 x i8> %a1 to <16 x i16>
+ %sum = add <16 x i16> %x0, %x1
+ %shift = lshr <16 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <16 x i16> %shift to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_fixed_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a1, %a0
+ %shift = lshr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %shift
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE2-LABEL: test_ext_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; SSE2-NEXT: paddd %xmm3, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE4-NEXT: paddd %xmm0, %xmm1
+; SSE4-NEXT: paddd %xmm4, %xmm2
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: packusdw %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i16> %a0 to <8 x i32>
+ %x1 = zext <8 x i16> %a1 to <8 x i32>
+ %sum = add <8 x i32> %x0, %x1
+ %shift = lshr <8 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <8 x i32> %shift to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_fixed_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <4 x i32> %a0, %a1
+ %xor = xor <4 x i32> %a1, %a0
+ %shift = lshr <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
+ %res = add <4 x i32> %and, %shift
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: test_ext_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-NEXT: paddq %xmm3, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE4-NEXT: paddq %xmm0, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE4-NEXT: movaps %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i32> %a0 to <4 x i64>
+ %x1 = zext <4 x i32> %a1 to <4 x i64>
+ %sum = add <4 x i64> %x0, %x1
+ %shift = lshr <4 x i64> %sum, <i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <4 x i64> %shift to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: test_fixed_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_fixed_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %and = and <2 x i64> %a0, %a1
+ %xor = xor <2 x i64> %a1, %a0
+ %shift = lshr <2 x i64> %xor, <i64 1, i64 1>
+ %res = add <2 x i64> %and, %shift
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE2-LABEL: test_ext_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm1, %rsi
+; SSE2-NEXT: xorl %edi, %edi
+; SSE2-NEXT: addq %rcx, %rsi
+; SSE2-NEXT: setb %dil
+; SSE2-NEXT: xorl %ecx, %ecx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: shldq $63, %rdx, %rcx
+; SSE2-NEXT: shldq $63, %rsi, %rdi
+; SSE2-NEXT: movq %rdi, %xmm0
+; SSE2-NEXT: movq %rcx, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v2i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm0, %rax
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: movq %xmm1, %rdx
+; SSE4-NEXT: pextrq $1, %xmm1, %rsi
+; SSE4-NEXT: xorl %edi, %edi
+; SSE4-NEXT: addq %rcx, %rsi
+; SSE4-NEXT: setb %dil
+; SSE4-NEXT: xorl %ecx, %ecx
+; SSE4-NEXT: addq %rax, %rdx
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: shldq $63, %rdx, %rcx
+; SSE4-NEXT: shldq $63, %rsi, %rdi
+; SSE4-NEXT: movq %rdi, %xmm1
+; SSE4-NEXT: movq %rcx, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: xorl %edi, %edi
+; AVX1-NEXT: addq %rcx, %rsi
+; AVX1-NEXT: setb %dil
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: addq %rax, %rdx
+; AVX1-NEXT: setb %cl
+; AVX1-NEXT: shldq $63, %rdx, %rcx
+; AVX1-NEXT: shldq $63, %rsi, %rdi
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX2-NEXT: xorl %edi, %edi
+; AVX2-NEXT: addq %rcx, %rsi
+; AVX2-NEXT: setb %dil
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: addq %rax, %rdx
+; AVX2-NEXT: setb %cl
+; AVX2-NEXT: shldq $63, %rdx, %rcx
+; AVX2-NEXT: shldq $63, %rsi, %rdi
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX512-NEXT: vmovq %xmm1, %rsi
+; AVX512-NEXT: xorl %edi, %edi
+; AVX512-NEXT: addq %rcx, %rdx
+; AVX512-NEXT: setb %dil
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: addq %rax, %rsi
+; AVX512-NEXT: setb %cl
+; AVX512-NEXT: shldq $63, %rsi, %rcx
+; AVX512-NEXT: shldq $63, %rdx, %rdi
+; AVX512-NEXT: vmovq %rdi, %xmm0
+; AVX512-NEXT: vmovq %rcx, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: retq
+ %x0 = zext <2 x i64> %a0 to <2 x i128>
+ %x1 = zext <2 x i64> %a1 to <2 x i128>
+ %sum = add <2 x i128> %x0, %x1
+ %shift = lshr <2 x i128> %sum, <i128 1, i128 1>
+ %res = trunc <2 x i128> %shift to <2 x i64>
+ ret <2 x i64> %res
+}
+
+;
+; 256-bit vectors
+;
+
+define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_fixed_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: paddb %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <32 x i8> %a0, %a1
+ %xor = xor <32 x i8> %a0, %a1
+ %shift = lshr <32 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <32 x i8> %and, %shift
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE2-LABEL: test_ext_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: paddw %xmm5, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: paddw %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: paddw %xmm6, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: packuswb %xmm7, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE4-NEXT: paddw %xmm1, %xmm3
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
+; SSE4-NEXT: paddw %xmm0, %xmm2
+; SSE4-NEXT: paddw %xmm6, %xmm4
+; SSE4-NEXT: paddw %xmm7, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: packuswb %xmm3, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: packuswb %xmm2, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: movdqa %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX1-NEXT: vpaddw %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
+; AVX1-NEXT: vpaddw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVX1-NEXT: vpaddw %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i8> %a0 to <32 x i16>
+ %x1 = zext <32 x i8> %a1 to <32 x i16>
+ %sum = add <32 x i16> %x0, %x1
+ %shift = lshr <32 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <32 x i16> %shift to <32 x i8>
+ ret <32 x i8> %res
+}
+
+define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_fixed_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <16 x i16> %a0, %a1
+ %xor = xor <16 x i16> %a1, %a0
+ %shift = lshr <16 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <16 x i16> %and, %shift
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE2-LABEL: test_ext_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm5, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: pslld $15, %xmm7
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm7, %xmm0
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE4-NEXT: paddd %xmm1, %xmm3
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE4-NEXT: paddd %xmm0, %xmm2
+; SSE4-NEXT: paddd %xmm6, %xmm4
+; SSE4-NEXT: paddd %xmm7, %xmm1
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: packusdw %xmm3, %xmm4
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: packusdw %xmm2, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: movdqa %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT: vpaddd %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i16> %a0 to <16 x i32>
+ %x1 = zext <16 x i16> %a1 to <16 x i32>
+ %sum = add <16 x i32> %x0, %x1
+ %shift = lshr <16 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <16 x i32> %shift to <16 x i16>
+ ret <16 x i16> %res
+}
+
+define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_fixed_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <8 x i32> %a0, %a1
+ %xor = xor <8 x i32> %a1, %a0
+ %shift = lshr <8 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <8 x i32> %and, %shift
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test_ext_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: paddq %xmm5, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddq %xmm6, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm1, %xmm3
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE4-NEXT: paddq %xmm0, %xmm2
+; SSE4-NEXT: paddq %xmm6, %xmm4
+; SSE4-NEXT: paddq %xmm7, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE4-NEXT: movaps %xmm1, %xmm0
+; SSE4-NEXT: movaps %xmm4, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX1-NEXT: vpaddq %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm2[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i32> %a0 to <8 x i64>
+ %x1 = zext <8 x i32> %a1 to <8 x i64>
+ %sum = add <8 x i64> %x0, %x1
+ %shift = lshr <8 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <8 x i64> %shift to <8 x i32>
+ ret <8 x i32> %res
+}
+
+define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_fixed_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: paddq %xmm4, %xmm1
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+ %and = and <4 x i64> %a0, %a1
+ %xor = xor <4 x i64> %a1, %a0
+ %shift = lshr <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
+ %res = add <4 x i64> %and, %shift
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE2-LABEL: test_ext_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rdi
+; SSE2-NEXT: movq %xmm1, %r9
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r10
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r8
+; SSE2-NEXT: movq %xmm3, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: xorl %ecx, %ecx
+; SSE2-NEXT: addq %r11, %rax
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: xorl %r11d, %r11d
+; SSE2-NEXT: addq %r10, %rdx
+; SSE2-NEXT: setb %r11b
+; SSE2-NEXT: xorl %r10d, %r10d
+; SSE2-NEXT: addq %r9, %rsi
+; SSE2-NEXT: setb %r10b
+; SSE2-NEXT: xorl %r9d, %r9d
+; SSE2-NEXT: addq %rdi, %r8
+; SSE2-NEXT: setb %r9b
+; SSE2-NEXT: shldq $63, %r8, %r9
+; SSE2-NEXT: shldq $63, %rsi, %r10
+; SSE2-NEXT: shldq $63, %rdx, %r11
+; SSE2-NEXT: shldq $63, %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm0
+; SSE2-NEXT: movq %r11, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movq %r10, %xmm1
+; SSE2-NEXT: movq %r9, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v4i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm1, %r8
+; SSE4-NEXT: pextrq $1, %xmm1, %r9
+; SSE4-NEXT: movq %xmm0, %r10
+; SSE4-NEXT: pextrq $1, %xmm0, %r11
+; SSE4-NEXT: movq %xmm3, %rdi
+; SSE4-NEXT: pextrq $1, %xmm3, %rsi
+; SSE4-NEXT: movq %xmm2, %rdx
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: xorl %ecx, %ecx
+; SSE4-NEXT: addq %r11, %rax
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: xorl %r11d, %r11d
+; SSE4-NEXT: addq %r10, %rdx
+; SSE4-NEXT: setb %r11b
+; SSE4-NEXT: xorl %r10d, %r10d
+; SSE4-NEXT: addq %r9, %rsi
+; SSE4-NEXT: setb %r10b
+; SSE4-NEXT: xorl %r9d, %r9d
+; SSE4-NEXT: addq %r8, %rdi
+; SSE4-NEXT: setb %r9b
+; SSE4-NEXT: shldq $63, %rdi, %r9
+; SSE4-NEXT: shldq $63, %rsi, %r10
+; SSE4-NEXT: shldq $63, %rdx, %r11
+; SSE4-NEXT: shldq $63, %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm1
+; SSE4-NEXT: movq %r11, %xmm0
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: movq %r10, %xmm2
+; SSE4-NEXT: movq %r9, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq %xmm0, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm0, %r9
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r10
+; AVX1-NEXT: vpextrq $1, %xmm0, %r11
+; AVX1-NEXT: vmovq %xmm1, %r8
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: addq %r11, %rax
+; AVX1-NEXT: setb %cl
+; AVX1-NEXT: xorl %r11d, %r11d
+; AVX1-NEXT: addq %r10, %rdx
+; AVX1-NEXT: setb %r11b
+; AVX1-NEXT: xorl %r10d, %r10d
+; AVX1-NEXT: addq %r9, %rsi
+; AVX1-NEXT: setb %r10b
+; AVX1-NEXT: xorl %r9d, %r9d
+; AVX1-NEXT: addq %rdi, %r8
+; AVX1-NEXT: setb %r9b
+; AVX1-NEXT: shldq $63, %r8, %r9
+; AVX1-NEXT: shldq $63, %rsi, %r10
+; AVX1-NEXT: shldq $63, %rdx, %r11
+; AVX1-NEXT: shldq $63, %rax, %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: vmovq %r11, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovq %r10, %xmm1
+; AVX1-NEXT: vmovq %r9, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovq %xmm0, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm0, %r9
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r10
+; AVX2-NEXT: vpextrq $1, %xmm0, %r11
+; AVX2-NEXT: vmovq %xmm1, %r8
+; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: addq %r11, %rax
+; AVX2-NEXT: setb %cl
+; AVX2-NEXT: xorl %r11d, %r11d
+; AVX2-NEXT: addq %r10, %rdx
+; AVX2-NEXT: setb %r11b
+; AVX2-NEXT: xorl %r10d, %r10d
+; AVX2-NEXT: addq %r9, %rsi
+; AVX2-NEXT: setb %r10b
+; AVX2-NEXT: xorl %r9d, %r9d
+; AVX2-NEXT: addq %rdi, %r8
+; AVX2-NEXT: setb %r9b
+; AVX2-NEXT: shldq $63, %r8, %r9
+; AVX2-NEXT: shldq $63, %rsi, %r10
+; AVX2-NEXT: shldq $63, %rdx, %r11
+; AVX2-NEXT: shldq $63, %rax, %rcx
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: vmovq %r11, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vmovq %r10, %xmm1
+; AVX2-NEXT: vmovq %r9, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovq %xmm0, %rsi
+; AVX512-NEXT: vpextrq $1, %xmm0, %r9
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %r10
+; AVX512-NEXT: vpextrq $1, %xmm0, %r11
+; AVX512-NEXT: vmovq %xmm1, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: vmovq %xmm0, %r8
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: addq %r11, %rax
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: xorl %r11d, %r11d
+; AVX512-NEXT: addq %r10, %r8
+; AVX512-NEXT: setb %r11b
+; AVX512-NEXT: xorl %r10d, %r10d
+; AVX512-NEXT: addq %r9, %rcx
+; AVX512-NEXT: setb %r10b
+; AVX512-NEXT: xorl %r9d, %r9d
+; AVX512-NEXT: addq %rsi, %rdi
+; AVX512-NEXT: setb %r9b
+; AVX512-NEXT: shldq $63, %rdi, %r9
+; AVX512-NEXT: shldq $63, %rcx, %r10
+; AVX512-NEXT: shldq $63, %r8, %r11
+; AVX512-NEXT: shldq $63, %rax, %rdx
+; AVX512-NEXT: vmovq %rdx, %xmm0
+; AVX512-NEXT: vmovq %r11, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vmovq %r10, %xmm1
+; AVX512-NEXT: vmovq %r9, %xmm2
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+ %x0 = zext <4 x i64> %a0 to <4 x i128>
+ %x1 = zext <4 x i64> %a1 to <4 x i128>
+ %sum = add <4 x i128> %x0, %x1
+ %shift = lshr <4 x i128> %sum, <i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <4 x i128> %shift to <4 x i64>
+ ret <4 x i64> %res
+}
+
+;
+; 512-bit vectors
+;
+
+define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE-LABEL: test_fixed_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: pand %xmm7, %xmm9
+; SSE-NEXT: movdqa %xmm2, %xmm10
+; SSE-NEXT: pand %xmm6, %xmm10
+; SSE-NEXT: movdqa %xmm1, %xmm11
+; SSE-NEXT: pand %xmm5, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: paddb %xmm9, %xmm3
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm10, %xmm2
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm11, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm8, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm6
+; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm6, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: vpaddb %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <64 x i8> %a0, %a1
+ %xor = xor <64 x i8> %a0, %a1
+ %shift = lshr <64 x i8> %xor, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %res = add <64 x i8> %and, %shift
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
+; SSE2-LABEL: test_ext_v64i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: movdqa %xmm3, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm12
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm8[8],xmm13[9],xmm8[9],xmm13[10],xmm8[10],xmm13[11],xmm8[11],xmm13[12],xmm8[12],xmm13[13],xmm8[13],xmm13[14],xmm8[14],xmm13[15],xmm8[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm7, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm10, %xmm9
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm7, %xmm3
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm11, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm12, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
+; SSE2-NEXT: paddw %xmm13, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE2-NEXT: paddw %xmm4, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm9
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: packuswb %xmm9, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm2
+; SSE2-NEXT: packuswb %xmm7, %xmm2
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: packuswb %xmm6, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v64i8:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm9
+; SSE4-NEXT: movdqa %xmm2, %xmm10
+; SSE4-NEXT: movdqa %xmm1, %xmm11
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm13, %xmm13
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm13[8],xmm9[9],xmm13[9],xmm9[10],xmm13[10],xmm9[11],xmm13[11],xmm9[12],xmm13[12],xmm9[13],xmm13[13],xmm9[14],xmm13[14],xmm9[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm13[8],xmm10[9],xmm13[9],xmm10[10],xmm13[10],xmm10[11],xmm13[11],xmm10[12],xmm13[12],xmm10[13],xmm13[13],xmm10[14],xmm13[14],xmm10[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm13[8],xmm11[9],xmm13[9],xmm11[10],xmm13[10],xmm11[11],xmm13[11],xmm11[12],xmm13[12],xmm11[13],xmm13[13],xmm11[14],xmm13[14],xmm11[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm13[8],xmm8[9],xmm13[9],xmm8[10],xmm13[10],xmm8[11],xmm13[11],xmm8[12],xmm13[12],xmm8[13],xmm13[13],xmm8[14],xmm13[14],xmm8[15],xmm13[15]
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm9, %xmm7
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm13[8],xmm6[9],xmm13[9],xmm6[10],xmm13[10],xmm6[11],xmm13[11],xmm6[12],xmm13[12],xmm6[13],xmm13[13],xmm6[14],xmm13[14],xmm6[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm10, %xmm6
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm11, %xmm5
+; SSE4-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm13[8],xmm4[9],xmm13[9],xmm4[10],xmm13[10],xmm4[11],xmm13[11],xmm4[12],xmm13[12],xmm4[13],xmm13[13],xmm4[14],xmm13[14],xmm4[15],xmm13[15]
+; SSE4-NEXT: paddw %xmm8, %xmm4
+; SSE4-NEXT: paddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: paddw %xmm14, %xmm2
+; SSE4-NEXT: paddw %xmm15, %xmm1
+; SSE4-NEXT: paddw %xmm12, %xmm0
+; SSE4-NEXT: psrlw $1, %xmm7
+; SSE4-NEXT: psrlw $1, %xmm6
+; SSE4-NEXT: psrlw $1, %xmm5
+; SSE4-NEXT: psrlw $1, %xmm4
+; SSE4-NEXT: psrlw $1, %xmm3
+; SSE4-NEXT: packuswb %xmm7, %xmm3
+; SSE4-NEXT: psrlw $1, %xmm2
+; SSE4-NEXT: packuswb %xmm6, %xmm2
+; SSE4-NEXT: psrlw $1, %xmm1
+; SSE4-NEXT: packuswb %xmm5, %xmm1
+; SSE4-NEXT: psrlw $1, %xmm0
+; SSE4-NEXT: packuswb %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero,xmm9[4],zero,xmm9[5],zero,xmm9[6],zero,xmm9[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm12, %xmm8, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
+; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero,xmm11[4],zero,xmm11[5],zero,xmm11[6],zero,xmm11[7],zero
+; AVX1-NEXT: vpaddw %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero,xmm12[4],zero,xmm12[5],zero,xmm12[6],zero,xmm12[7],zero
+; AVX1-NEXT: vpaddw %xmm2, %xmm9, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v64i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
+; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT: vpaddw %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v64i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <64 x i8> %a0 to <64 x i16>
+ %x1 = zext <64 x i8> %a1 to <64 x i16>
+ %sum = add <64 x i16> %x0, %x1
+ %shift = lshr <64 x i16> %sum, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = trunc <64 x i16> %shift to <64 x i8>
+ ret <64 x i8> %res
+}
+
+define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE-LABEL: test_fixed_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: paddw %xmm8, %xmm3
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: paddw %xmm9, %xmm2
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm10, %xmm1
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddw %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <32 x i16> %a0, %a1
+ %xor = xor <32 x i16> %a1, %a0
+ %shift = lshr <32 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <32 x i16> %and, %shift
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
+; SSE2-LABEL: test_ext_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: movdqa %xmm0, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm3, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm9, %xmm10
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm5, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm11, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm7, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE2-NEXT: paddd %xmm13, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: pslld $15, %xmm10
+; SSE2-NEXT: psrad $16, %xmm10
+; SSE2-NEXT: pslld $15, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm10, %xmm0
+; SSE2-NEXT: pslld $15, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: pslld $15, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm9, %xmm1
+; SSE2-NEXT: pslld $15, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $15, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm5, %xmm2
+; SSE2-NEXT: pslld $15, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $15, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: packssdw %xmm4, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v32i16:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm9
+; SSE4-NEXT: movdqa %xmm2, %xmm10
+; SSE4-NEXT: movdqa %xmm1, %xmm11
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm13, %xmm13
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm13[4],xmm8[5],xmm13[5],xmm8[6],xmm13[6],xmm8[7],xmm13[7]
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm9, %xmm7
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm10, %xmm6
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm11, %xmm5
+; SSE4-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSE4-NEXT: paddd %xmm8, %xmm4
+; SSE4-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: paddd %xmm14, %xmm2
+; SSE4-NEXT: paddd %xmm15, %xmm1
+; SSE4-NEXT: paddd %xmm12, %xmm0
+; SSE4-NEXT: psrld $1, %xmm7
+; SSE4-NEXT: psrld $1, %xmm6
+; SSE4-NEXT: psrld $1, %xmm5
+; SSE4-NEXT: psrld $1, %xmm4
+; SSE4-NEXT: psrld $1, %xmm3
+; SSE4-NEXT: packusdw %xmm7, %xmm3
+; SSE4-NEXT: psrld $1, %xmm2
+; SSE4-NEXT: packusdw %xmm6, %xmm2
+; SSE4-NEXT: psrld $1, %xmm1
+; SSE4-NEXT: packusdw %xmm5, %xmm1
+; SSE4-NEXT: psrld $1, %xmm0
+; SSE4-NEXT: packusdw %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v32i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm5, %xmm11, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm12, %xmm8, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero
+; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero
+; AVX1-NEXT: vpaddd %xmm2, %xmm9, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v32i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT: vpaddd %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm4, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm3, %ymm2
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v32i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <32 x i16> %a0 to <32 x i32>
+ %x1 = zext <32 x i16> %a1 to <32 x i32>
+ %sum = add <32 x i32> %x0, %x1
+ %shift = lshr <32 x i32> %sum, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = trunc <32 x i32> %shift to <32 x i16>
+ ret <32 x i16> %res
+}
+
+define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE-LABEL: test_fixed_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrld $1, %xmm3
+; SSE-NEXT: paddd %xmm8, %xmm3
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: paddd %xmm9, %xmm2
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm10, %xmm1
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <16 x i32> %a0, %a1
+ %xor = xor <16 x i32> %a1, %a0
+ %shift = lshr <16 x i32> %xor, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %res = add <16 x i32> %and, %shift
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) {
+; SSE2-LABEL: test_ext_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: movdqa %xmm3, %xmm10
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm8[2],xmm11[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm12
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm8[2],xmm12[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm13 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
+; SSE2-NEXT: movdqa %xmm7, %xmm9
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm10, %xmm9
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm7, %xmm3
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm11, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm12, %xmm6
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm8[2],xmm5[3],xmm8[3]
+; SSE2-NEXT: paddq %xmm13, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: psrlq $1, %xmm9
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm9[0,2]
+; SSE2-NEXT: psrlq $1, %xmm7
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm7[0,2]
+; SSE2-NEXT: psrlq $1, %xmm6
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
+; SSE2-NEXT: psrlq $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v16i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm3, %xmm9
+; SSE4-NEXT: movdqa %xmm2, %xmm10
+; SSE4-NEXT: movdqa %xmm1, %xmm11
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm13, %xmm13
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero
+; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm13[2],xmm9[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm13[2],xmm10[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm13[2],xmm11[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm13[2],xmm8[3],xmm13[3]
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm13[2],xmm7[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm9, %xmm7
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm10, %xmm6
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm13[2],xmm5[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm11, %xmm5
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
+; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3]
+; SSE4-NEXT: paddq %xmm8, %xmm4
+; SSE4-NEXT: paddq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE4-NEXT: paddq %xmm14, %xmm2
+; SSE4-NEXT: paddq %xmm15, %xmm1
+; SSE4-NEXT: paddq %xmm12, %xmm0
+; SSE4-NEXT: psrlq $1, %xmm7
+; SSE4-NEXT: psrlq $1, %xmm6
+; SSE4-NEXT: psrlq $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm3
+; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE4-NEXT: psrlq $1, %xmm1
+; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
+; SSE4-NEXT: psrlq $1, %xmm0
+; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v16i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm8
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm9 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm8 = xmm8[0],zero,xmm8[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm6, %xmm12, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm7, %xmm12, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm13 = xmm12[2],xmm5[2],xmm12[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm13, %xmm9, %xmm9
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX1-NEXT: vpaddq %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm11[0],zero,xmm11[1],zero
+; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm12[0],zero,xmm12[1],zero
+; AVX1-NEXT: vpaddq %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm7, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm9, %xmm7
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm6, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm5[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],ymm4[2,3]
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x0 = zext <16 x i32> %a0 to <16 x i64>
+ %x1 = zext <16 x i32> %a1 to <16 x i64>
+ %sum = add <16 x i64> %x0, %x1
+ %shift = lshr <16 x i64> %sum, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = trunc <16 x i64> %shift to <16 x i32>
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE-LABEL: test_fixed_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlq $1, %xmm3
+; SSE-NEXT: paddq %xmm8, %xmm3
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: paddq %xmm9, %xmm2
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: paddq %xmm10, %xmm1
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm11, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_fixed_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_fixed_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_fixed_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %and = and <8 x i64> %a0, %a1
+ %xor = xor <8 x i64> %a1, %a0
+ %shift = lshr <8 x i64> %xor, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %res = add <8 x i64> %and, %shift
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test_ext_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 56
+; SSE2-NEXT: .cfi_offset %rbx, -56
+; SSE2-NEXT: .cfi_offset %r12, -48
+; SSE2-NEXT: .cfi_offset %r13, -40
+; SSE2-NEXT: .cfi_offset %r14, -32
+; SSE2-NEXT: .cfi_offset %r15, -24
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rbx
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %r12
+; SSE2-NEXT: movq %xmm2, %rbp
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %r13
+; SSE2-NEXT: movq %xmm1, %r15
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %r14
+; SSE2-NEXT: movq %xmm0, %r11
+; SSE2-NEXT: movq %xmm7, %r10
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %r9
+; SSE2-NEXT: movq %xmm6, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: movq %xmm5, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm4, %rax
+; SSE2-NEXT: xorl %ecx, %ecx
+; SSE2-NEXT: addq %r11, %rax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: xorl %r11d, %r11d
+; SSE2-NEXT: addq %r14, %rdx
+; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: setb %r11b
+; SSE2-NEXT: xorl %r14d, %r14d
+; SSE2-NEXT: addq %r15, %rsi
+; SSE2-NEXT: setb %r14b
+; SSE2-NEXT: xorl %r15d, %r15d
+; SSE2-NEXT: addq %r13, %rdi
+; SSE2-NEXT: setb %r15b
+; SSE2-NEXT: xorl %r13d, %r13d
+; SSE2-NEXT: addq %rbp, %r8
+; SSE2-NEXT: setb %r13b
+; SSE2-NEXT: xorl %ebp, %ebp
+; SSE2-NEXT: addq %r12, %r9
+; SSE2-NEXT: setb %bpl
+; SSE2-NEXT: xorl %r12d, %r12d
+; SSE2-NEXT: addq %rbx, %r10
+; SSE2-NEXT: movq %xmm8, %rdx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE2-NEXT: setb %r12b
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: xorl %ebx, %ebx
+; SSE2-NEXT: addq %rdx, %rax
+; SSE2-NEXT: setb %bl
+; SSE2-NEXT: shldq $63, %rax, %rbx
+; SSE2-NEXT: shldq $63, %r10, %r12
+; SSE2-NEXT: shldq $63, %r9, %rbp
+; SSE2-NEXT: shldq $63, %r8, %r13
+; SSE2-NEXT: shldq $63, %rdi, %r15
+; SSE2-NEXT: shldq $63, %rsi, %r14
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rax, %r11
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE2-NEXT: shldq $63, %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm0
+; SSE2-NEXT: movq %r11, %xmm4
+; SSE2-NEXT: movq %r14, %xmm1
+; SSE2-NEXT: movq %r15, %xmm5
+; SSE2-NEXT: movq %r13, %xmm2
+; SSE2-NEXT: movq %rbp, %xmm6
+; SSE2-NEXT: movq %r12, %xmm3
+; SSE2-NEXT: movq %rbx, %xmm7
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: .cfi_def_cfa_offset 48
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: .cfi_def_cfa_offset 40
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: .cfi_def_cfa_offset 32
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: test_ext_v8i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 56
+; SSE4-NEXT: .cfi_offset %rbx, -56
+; SSE4-NEXT: .cfi_offset %r12, -48
+; SSE4-NEXT: .cfi_offset %r13, -40
+; SSE4-NEXT: .cfi_offset %r14, -32
+; SSE4-NEXT: .cfi_offset %r15, -24
+; SSE4-NEXT: .cfi_offset %rbp, -16
+; SSE4-NEXT: pextrq $1, %xmm3, %r14
+; SSE4-NEXT: movq %xmm2, %r13
+; SSE4-NEXT: pextrq $1, %xmm2, %rbp
+; SSE4-NEXT: movq %xmm1, %r12
+; SSE4-NEXT: pextrq $1, %xmm1, %r15
+; SSE4-NEXT: movq %xmm0, %rbx
+; SSE4-NEXT: pextrq $1, %xmm0, %r11
+; SSE4-NEXT: pextrq $1, %xmm7, %r10
+; SSE4-NEXT: movq %xmm6, %r9
+; SSE4-NEXT: pextrq $1, %xmm6, %r8
+; SSE4-NEXT: movq %xmm5, %rdi
+; SSE4-NEXT: pextrq $1, %xmm5, %rsi
+; SSE4-NEXT: movq %xmm4, %rdx
+; SSE4-NEXT: pextrq $1, %xmm4, %rax
+; SSE4-NEXT: xorl %ecx, %ecx
+; SSE4-NEXT: addq %r11, %rax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: xorl %r11d, %r11d
+; SSE4-NEXT: addq %rbx, %rdx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: setb %r11b
+; SSE4-NEXT: xorl %ebx, %ebx
+; SSE4-NEXT: addq %r15, %rsi
+; SSE4-NEXT: setb %bl
+; SSE4-NEXT: xorl %r15d, %r15d
+; SSE4-NEXT: addq %r12, %rdi
+; SSE4-NEXT: setb %r15b
+; SSE4-NEXT: xorl %r12d, %r12d
+; SSE4-NEXT: addq %rbp, %r8
+; SSE4-NEXT: setb %r12b
+; SSE4-NEXT: xorl %ebp, %ebp
+; SSE4-NEXT: addq %r13, %r9
+; SSE4-NEXT: setb %bpl
+; SSE4-NEXT: xorl %r13d, %r13d
+; SSE4-NEXT: addq %r14, %r10
+; SSE4-NEXT: movq %xmm3, %rdx
+; SSE4-NEXT: setb %r13b
+; SSE4-NEXT: movq %xmm7, %rax
+; SSE4-NEXT: xorl %r14d, %r14d
+; SSE4-NEXT: addq %rdx, %rax
+; SSE4-NEXT: setb %r14b
+; SSE4-NEXT: shldq $63, %rax, %r14
+; SSE4-NEXT: shldq $63, %r10, %r13
+; SSE4-NEXT: shldq $63, %r9, %rbp
+; SSE4-NEXT: shldq $63, %r8, %r12
+; SSE4-NEXT: shldq $63, %rdi, %r15
+; SSE4-NEXT: shldq $63, %rsi, %rbx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rax, %r11
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: shldq $63, %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm4
+; SSE4-NEXT: movq %r11, %xmm0
+; SSE4-NEXT: movq %rbx, %xmm5
+; SSE4-NEXT: movq %r15, %xmm1
+; SSE4-NEXT: movq %r12, %xmm6
+; SSE4-NEXT: movq %rbp, %xmm2
+; SSE4-NEXT: movq %r13, %xmm7
+; SSE4-NEXT: movq %r14, %xmm3
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: .cfi_def_cfa_offset 48
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: .cfi_def_cfa_offset 40
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: .cfi_def_cfa_offset 32
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: .cfi_def_cfa_offset 24
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: .cfi_def_cfa_offset 16
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: .cfi_def_cfa_offset 8
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: test_ext_v8i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vmovq %xmm4, %r15
+; AVX1-NEXT: vpextrq $1, %xmm4, %rbp
+; AVX1-NEXT: vmovq %xmm0, %r13
+; AVX1-NEXT: vpextrq $1, %xmm0, %r12
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r14
+; AVX1-NEXT: vpextrq $1, %xmm0, %r11
+; AVX1-NEXT: vpextrq $1, %xmm3, %r10
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %r9
+; AVX1-NEXT: vpextrq $1, %xmm0, %r8
+; AVX1-NEXT: vmovq %xmm2, %rdi
+; AVX1-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rdx
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: addq %r11, %rax
+; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: setb %cl
+; AVX1-NEXT: xorl %r11d, %r11d
+; AVX1-NEXT: addq %r14, %rdx
+; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT: setb %r11b
+; AVX1-NEXT: xorl %r14d, %r14d
+; AVX1-NEXT: addq %r12, %rsi
+; AVX1-NEXT: setb %r14b
+; AVX1-NEXT: xorl %r12d, %r12d
+; AVX1-NEXT: addq %r13, %rdi
+; AVX1-NEXT: setb %r12b
+; AVX1-NEXT: xorl %r13d, %r13d
+; AVX1-NEXT: addq %rbp, %r8
+; AVX1-NEXT: setb %r13b
+; AVX1-NEXT: xorl %ebp, %ebp
+; AVX1-NEXT: addq %r15, %r9
+; AVX1-NEXT: setb %bpl
+; AVX1-NEXT: xorl %r15d, %r15d
+; AVX1-NEXT: addq %rbx, %r10
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: setb %r15b
+; AVX1-NEXT: vmovq %xmm3, %rax
+; AVX1-NEXT: xorl %ebx, %ebx
+; AVX1-NEXT: addq %rdx, %rax
+; AVX1-NEXT: setb %bl
+; AVX1-NEXT: shldq $63, %rax, %rbx
+; AVX1-NEXT: shldq $63, %r10, %r15
+; AVX1-NEXT: shldq $63, %r9, %rbp
+; AVX1-NEXT: shldq $63, %r8, %r13
+; AVX1-NEXT: shldq $63, %rdi, %r12
+; AVX1-NEXT: shldq $63, %rsi, %r14
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rax, %r11
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX1-NEXT: shldq $63, %rax, %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: vmovq %r11, %xmm1
+; AVX1-NEXT: vmovq %r14, %xmm2
+; AVX1-NEXT: vmovq %r12, %xmm3
+; AVX1-NEXT: vmovq %r13, %xmm4
+; AVX1-NEXT: vmovq %rbp, %xmm5
+; AVX1-NEXT: vmovq %r15, %xmm6
+; AVX1-NEXT: vmovq %rbx, %xmm7
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .cfi_def_cfa_offset 8
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: vpextrq $1, %xmm1, %rbx
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vmovq %xmm4, %r15
+; AVX2-NEXT: vpextrq $1, %xmm4, %rbp
+; AVX2-NEXT: vmovq %xmm0, %r13
+; AVX2-NEXT: vpextrq $1, %xmm0, %r12
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r14
+; AVX2-NEXT: vpextrq $1, %xmm0, %r11
+; AVX2-NEXT: vpextrq $1, %xmm3, %r10
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %r9
+; AVX2-NEXT: vpextrq $1, %xmm0, %r8
+; AVX2-NEXT: vmovq %xmm2, %rdi
+; AVX2-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rdx
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: addq %r11, %rax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: setb %cl
+; AVX2-NEXT: xorl %r11d, %r11d
+; AVX2-NEXT: addq %r14, %rdx
+; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: setb %r11b
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: addq %r12, %rsi
+; AVX2-NEXT: setb %r14b
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: addq %r13, %rdi
+; AVX2-NEXT: setb %r12b
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: addq %rbp, %r8
+; AVX2-NEXT: setb %r13b
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: addq %r15, %r9
+; AVX2-NEXT: setb %bpl
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: addq %rbx, %r10
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: setb %r15b
+; AVX2-NEXT: vmovq %xmm3, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: addq %rdx, %rax
+; AVX2-NEXT: setb %bl
+; AVX2-NEXT: shldq $63, %rax, %rbx
+; AVX2-NEXT: shldq $63, %r10, %r15
+; AVX2-NEXT: shldq $63, %r9, %rbp
+; AVX2-NEXT: shldq $63, %r8, %r13
+; AVX2-NEXT: shldq $63, %rdi, %r12
+; AVX2-NEXT: shldq $63, %rsi, %r14
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rax, %r11
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: shldq $63, %rax, %rcx
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: vmovq %r11, %xmm1
+; AVX2-NEXT: vmovq %r14, %xmm2
+; AVX2-NEXT: vmovq %r12, %xmm3
+; AVX2-NEXT: vmovq %r13, %xmm4
+; AVX2-NEXT: vmovq %rbp, %xmm5
+; AVX2-NEXT: vmovq %r15, %xmm6
+; AVX2-NEXT: vmovq %rbx, %xmm7
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .cfi_def_cfa_offset 8
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v8i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: vpextrq $1, %xmm0, %r10
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %r13
+; AVX512-NEXT: vmovq %xmm2, %r15
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vmovq %xmm2, %rbp
+; AVX512-NEXT: vpextrq $1, %xmm2, %r12
+; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %r14
+; AVX512-NEXT: vpextrq $1, %xmm2, %rbx
+; AVX512-NEXT: vpextrq $1, %xmm1, %r9
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %r8
+; AVX512-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512-NEXT: vpextrq $1, %xmm2, %rcx
+; AVX512-NEXT: vmovq %xmm2, %r11
+; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rdi
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: addq %rbx, %rax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: setb %sil
+; AVX512-NEXT: xorl %ebx, %ebx
+; AVX512-NEXT: addq %r14, %rdi
+; AVX512-NEXT: setb %bl
+; AVX512-NEXT: xorl %r14d, %r14d
+; AVX512-NEXT: addq %r12, %rcx
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: setb %r14b
+; AVX512-NEXT: xorl %r12d, %r12d
+; AVX512-NEXT: addq %rbp, %r11
+; AVX512-NEXT: setb %r12b
+; AVX512-NEXT: xorl %ebp, %ebp
+; AVX512-NEXT: addq %r13, %rdx
+; AVX512-NEXT: setb %bpl
+; AVX512-NEXT: xorl %r13d, %r13d
+; AVX512-NEXT: addq %r15, %r8
+; AVX512-NEXT: setb %r13b
+; AVX512-NEXT: xorl %r15d, %r15d
+; AVX512-NEXT: addq %r10, %r9
+; AVX512-NEXT: vmovq %xmm0, %rcx
+; AVX512-NEXT: setb %r15b
+; AVX512-NEXT: vmovq %xmm1, %rax
+; AVX512-NEXT: xorl %r10d, %r10d
+; AVX512-NEXT: addq %rcx, %rax
+; AVX512-NEXT: setb %r10b
+; AVX512-NEXT: shldq $63, %rax, %r10
+; AVX512-NEXT: shldq $63, %r9, %r15
+; AVX512-NEXT: shldq $63, %r8, %r13
+; AVX512-NEXT: shldq $63, %rdx, %rbp
+; AVX512-NEXT: shldq $63, %r11, %r12
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rax, %r14
+; AVX512-NEXT: shldq $63, %rdi, %rbx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: shldq $63, %rax, %rsi
+; AVX512-NEXT: vmovq %rsi, %xmm0
+; AVX512-NEXT: vmovq %rbx, %xmm1
+; AVX512-NEXT: vmovq %r14, %xmm2
+; AVX512-NEXT: vmovq %r12, %xmm3
+; AVX512-NEXT: vmovq %rbp, %xmm4
+; AVX512-NEXT: vmovq %r13, %xmm5
+; AVX512-NEXT: vmovq %r15, %xmm6
+; AVX512-NEXT: vmovq %r10, %xmm7
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: .cfi_def_cfa_offset 8
+; AVX512-NEXT: retq
+ %x0 = zext <8 x i64> %a0 to <8 x i128>
+ %x1 = zext <8 x i64> %a1 to <8 x i128>
+ %sum = add <8 x i128> %x0, %x1
+ %shift = lshr <8 x i128> %sum, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+ %res = trunc <8 x i128> %shift to <8 x i64>
+ ret <8 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
index 34ef23d..234c7a0a 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -553,8 +553,8 @@ define i8 @v8i32_or_select(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm2, %ymm3, %ymm1
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vorps %ymm0, %ymm3, %ymm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
@@ -571,8 +571,8 @@ define i8 @v8i32_or_select(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm2
; AVX2-NEXT: .LBB7_3:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm1
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/callbr-asm-kill.mir b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
index 86c58c4..0dded37 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-kill.mir
+++ b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
@@ -45,6 +45,7 @@ liveins:
- { reg: '$rsi', virtual-reg: '%3' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/CodeGen/X86/cmov.ll b/llvm/test/CodeGen/X86/cmov.ll
index 374e759..a8c068f 100644
--- a/llvm/test/CodeGen/X86/cmov.ll
+++ b/llvm/test/CodeGen/X86/cmov.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -disable-cgp-select2branch -x86-cmov-converter=false | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -disable-cgp-select2branch -x86-cmov-converter=false -mattr=+ndd --show-mc-encoding | FileCheck %s --check-prefix=NDD
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define i32 @test1(i32 %x, i32 %n, i32 %w, ptr %vp) nounwind readnone {
@@ -9,6 +10,13 @@ define i32 @test1(i32 %x, i32 %n, i32 %w, ptr %vp) nounwind readnone {
; CHECK-NEXT: movl $12, %eax
; CHECK-NEXT: cmovael (%rcx), %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test1:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: btl %esi, %edi # encoding: [0x0f,0xa3,0xf7]
+; NDD-NEXT: movl $12, %eax # encoding: [0xb8,0x0c,0x00,0x00,0x00]
+; NDD-NEXT: cmovael (%rcx), %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0x01]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = lshr i32 %x, %n
%1 = and i32 %0, 1
@@ -25,6 +33,13 @@ define i32 @test2(i32 %x, i32 %n, i32 %w, ptr %vp) nounwind readnone {
; CHECK-NEXT: movl $12, %eax
; CHECK-NEXT: cmovbl (%rcx), %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test2:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: btl %esi, %edi # encoding: [0x0f,0xa3,0xf7]
+; NDD-NEXT: movl $12, %eax # encoding: [0xb8,0x0c,0x00,0x00,0x00]
+; NDD-NEXT: cmovbl (%rcx), %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x42,0x01]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = lshr i32 %x, %n
%1 = and i32 %0, 1
@@ -50,6 +65,16 @@ define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
; CHECK-NEXT: callq bar@PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test3:
+; NDD: # %bb.0:
+; NDD-NEXT: pushq %rax # encoding: [0x50]
+; NDD-NEXT: testb $1, %dl # encoding: [0xf6,0xc2,0x01]
+; NDD-NEXT: cmovel %esi, %edi # EVEX TO LEGACY Compression encoding: [0x0f,0x44,0xfe]
+; NDD-NEXT: callq bar@PLT # encoding: [0xe8,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 1, value: bar@PLT-4, kind: FK_PCRel_4
+; NDD-NEXT: popq %rax # encoding: [0x58]
+; NDD-NEXT: retq # encoding: [0xc3]
%c = trunc i64 %a to i32
%d = trunc i64 %b to i32
%e = select i1 %p, i32 %c, i32 %d
@@ -114,6 +139,54 @@ define i1 @test4() nounwind {
; CHECK-NEXT: movl %ebx, %eax
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test4:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movsbl g_3(%rip), %eax # encoding: [0x0f,0xbe,0x05,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_3-4, kind: reloc_riprel_4byte
+; NDD-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
+; NDD-NEXT: shrl $7, %ecx # EVEX TO LEGACY Compression encoding: [0xc1,0xe9,0x07]
+; NDD-NEXT: xorb $1, %cl # EVEX TO LEGACY Compression encoding: [0x80,0xf1,0x01]
+; NDD-NEXT: sarl %cl, %eax, %ecx # encoding: [0x62,0xf4,0x74,0x18,0xd3,0xf8]
+; NDD-NEXT: movzbl g_96(%rip), %eax # encoding: [0x0f,0xb6,0x05,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_96-4, kind: reloc_riprel_4byte
+; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0]
+; NDD-NEXT: je .LBB3_2 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB3_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %bb.i.i.i
+; NDD-NEXT: movzbl g_100(%rip), %edx # encoding: [0x0f,0xb6,0x15,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_100-4, kind: reloc_riprel_4byte
+; NDD-NEXT: .LBB3_2: # %func_4.exit.i
+; NDD-NEXT: pushq %rbx # encoding: [0x53]
+; NDD-NEXT: xorl %edx, %edx # encoding: [0x31,0xd2]
+; NDD-NEXT: testb %cl, %cl # encoding: [0x84,0xc9]
+; NDD-NEXT: setne %bl # encoding: [0x0f,0x95,0xc3]
+; NDD-NEXT: movzbl %al, %ecx # encoding: [0x0f,0xb6,0xc8]
+; NDD-NEXT: cmovnel %edx, %ecx # EVEX TO LEGACY Compression encoding: [0x0f,0x45,0xca]
+; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0]
+; NDD-NEXT: je .LBB3_5 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB3_5-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.3: # %func_4.exit.i
+; NDD-NEXT: testb %bl, %bl # encoding: [0x84,0xdb]
+; NDD-NEXT: jne .LBB3_5 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB3_5-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.4: # %bb.i.i
+; NDD-NEXT: movzbl g_100(%rip), %ecx # encoding: [0x0f,0xb6,0x0d,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 3, value: g_100-4, kind: reloc_riprel_4byte
+; NDD-NEXT: xorl %ebx, %ebx # encoding: [0x31,0xdb]
+; NDD-NEXT: movl %eax, %ecx # encoding: [0x89,0xc1]
+; NDD-NEXT: .LBB3_5: # %func_1.exit
+; NDD-NEXT: movb %cl, g_96(%rip) # encoding: [0x88,0x0d,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 2, value: g_96-4, kind: reloc_riprel_4byte
+; NDD-NEXT: movzbl %cl, %esi # encoding: [0x0f,0xb6,0xf1]
+; NDD-NEXT: movl $_2E_str, %edi # encoding: [0xbf,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 1, value: _2E_str, kind: FK_Data_4
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: callq printf@PLT # encoding: [0xe8,A,A,A,A]
+; NDD-NEXT: # fixup A - offset: 1, value: printf@PLT-4, kind: FK_PCRel_4
+; NDD-NEXT: movl %ebx, %eax # encoding: [0x89,0xd8]
+; NDD-NEXT: popq %rbx # encoding: [0x5b]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i8, ptr @g_3, align 1
%1 = sext i8 %0 to i32
@@ -163,6 +236,14 @@ define i32 @test5(ptr nocapture %P) nounwind readonly {
; CHECK-NEXT: setge %al
; CHECK-NEXT: orl $-2, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test5:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: cmpl $42, (%rdi) # encoding: [0x83,0x3f,0x2a]
+; NDD-NEXT: setge %al # encoding: [0x0f,0x9d,0xc0]
+; NDD-NEXT: orl $-2, %eax # EVEX TO LEGACY Compression encoding: [0x83,0xc8,0xfe]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i32, ptr %P, align 4
%1 = icmp sgt i32 %0, 41
@@ -178,6 +259,14 @@ define i32 @test6(ptr nocapture %P) nounwind readonly {
; CHECK-NEXT: setl %al
; CHECK-NEXT: leal 4(%rax,%rax,8), %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test6:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: cmpl $42, (%rdi) # encoding: [0x83,0x3f,0x2a]
+; NDD-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0]
+; NDD-NEXT: leal 4(%rax,%rax,8), %eax # encoding: [0x8d,0x44,0xc0,0x04]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i32, ptr %P, align 4
%1 = icmp sgt i32 %0, 41
@@ -194,6 +283,13 @@ define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test7:
+; NDD: # %bb.0:
+; NDD-NEXT: testb $1, %dil # encoding: [0x40,0xf6,0xc7,0x01]
+; NDD-NEXT: cmovnel %esi, %edx, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x45,0xd6]
+; NDD-NEXT: # kill: def $al killed $al killed $eax
+; NDD-NEXT: retq # encoding: [0xc3]
%d = select i1 %c, i8 %a, i8 %b
ret i8 %d
}
@@ -205,6 +301,13 @@ define i64 @test8(i64 %0, i64 %1, i64 %2) {
; CHECK-NEXT: cmpq $-2147483648, %rdi # imm = 0x80000000
; CHECK-NEXT: cmovlq %rdx, %rax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: test8:
+; NDD: # %bb.0:
+; NDD-NEXT: cmpq $-2147483648, %rdi # encoding: [0x48,0x81,0xff,0x00,0x00,0x00,0x80]
+; NDD-NEXT: # imm = 0x80000000
+; NDD-NEXT: cmovgeq %rsi, %rdx, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x4d,0xd6]
+; NDD-NEXT: retq # encoding: [0xc3]
%4 = icmp sgt i64 %0, -2147483649
%5 = select i1 %4, i64 %1, i64 %2
ret i64 %5
@@ -218,6 +321,14 @@ define i32 @smin(i32 %x) {
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: cmovnsl %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: smin:
+; NDD: # %bb.0:
+; NDD-NEXT: notl %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0xf7,0xd7]
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $-1, %ecx # encoding: [0xb9,0xff,0xff,0xff,0xff]
+; NDD-NEXT: cmovsl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x48,0xc1]
+; NDD-NEXT: retq # encoding: [0xc3]
%not_x = xor i32 %x, -1
%1 = icmp slt i32 %not_x, -1
%sel = select i1 %1, i32 %not_x, i32 -1
@@ -231,6 +342,13 @@ define i32 @pr47049_1(i32 %0) {
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovlel %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_1:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
+; NDD-NEXT: cmovlel %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x4e,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp slt i32 %0, 1
%3 = select i1 %2, i32 %0, i32 1
ret i32 %3
@@ -243,6 +361,13 @@ define i32 @pr47049_2(i32 %0) {
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: cmovnsl %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_2:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $-1, %eax # encoding: [0xb8,0xff,0xff,0xff,0xff]
+; NDD-NEXT: cmovnsl %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x49,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp sgt i32 %0, -1
%3 = select i1 %2, i32 %0, i32 -1
ret i32 %3
@@ -255,6 +380,13 @@ define i32 @pr47049_3(i32 %0) {
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovgl %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_3:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
+; NDD-NEXT: cmovgl %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x4f,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp sgt i32 %0, 1
%3 = select i1 %2, i32 %0, i32 1
ret i32 %3
@@ -267,6 +399,13 @@ define i32 @pr47049_4(i32 %0) {
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovnel %edi, %eax
; CHECK-NEXT: retq
+;
+; NDD-LABEL: pr47049_4:
+; NDD: # %bb.0:
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
+; NDD-NEXT: cmovnel %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x45,0xc7]
+; NDD-NEXT: retq # encoding: [0xc3]
%2 = icmp ugt i32 %0, 1
%3 = select i1 %2, i32 %0, i32 1
ret i32 %3
diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll
index cd1953b..30e52f0 100644
--- a/llvm/test/CodeGen/X86/cmp.ll
+++ b/llvm/test/CodeGen/X86/cmp.ll
@@ -416,9 +416,8 @@ define i32 @test13(i32 %mask, i32 %base, i32 %intra) {
;
; NDD-LABEL: test13:
; NDD: # %bb.0:
-; NDD-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; NDD-NEXT: testb $8, %dil # encoding: [0x40,0xf6,0xc7,0x08]
-; NDD-NEXT: cmovnel %edx, %eax # encoding: [0x0f,0x45,0xc2]
+; NDD-NEXT: cmovnel %edx, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x45,0xf2]
; NDD-NEXT: retq # encoding: [0xc3]
%and = and i32 %mask, 8
%tobool = icmp ne i32 %and, 0
@@ -436,9 +435,8 @@ define i32 @test14(i32 %mask, i32 %base, i32 %intra) {
;
; NDD-LABEL: test14:
; NDD: # %bb.0:
-; NDD-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
-; NDD-NEXT: shrl $7, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0xc1,0xef,0x07]
-; NDD-NEXT: cmovnsl %edx, %eax # encoding: [0x0f,0x49,0xc2]
+; NDD-NEXT: shrl $7, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0xc1,0xef,0x07]
+; NDD-NEXT: cmovnsl %edx, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x49,0xf2]
; NDD-NEXT: retq # encoding: [0xc3]
%s = lshr i32 %mask, 7
%tobool = icmp sgt i32 %s, -1
@@ -1100,9 +1098,8 @@ define { i64, i64 } @pr39968(i64, i64, i32) {
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; NDD-NEXT: testb $64, %dl # encoding: [0xf6,0xc2,0x40]
-; NDD-NEXT: cmovneq %rdi, %rsi # encoding: [0x48,0x0f,0x45,0xf7]
-; NDD-NEXT: cmovneq %rdi, %rax # encoding: [0x48,0x0f,0x45,0xc7]
-; NDD-NEXT: movq %rsi, %rdx # encoding: [0x48,0x89,0xf2]
+; NDD-NEXT: cmovneq %rdi, %rsi, %rdx # encoding: [0x62,0xf4,0xec,0x18,0x45,0xf7]
+; NDD-NEXT: cmovneq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x45,0xc7]
; NDD-NEXT: retq # encoding: [0xc3]
%4 = and i32 %2, 64
%5 = icmp ne i32 %4, 0
diff --git a/llvm/test/CodeGen/X86/combine-pavg.ll b/llvm/test/CodeGen/X86/combine-pavg.ll
index 0743592..7a8ddf5 100644
--- a/llvm/test/CodeGen/X86/combine-pavg.ll
+++ b/llvm/test/CodeGen/X86/combine-pavg.ll
@@ -18,6 +18,22 @@ define <16 x i8> @combine_pavgb_self(<16 x i8> %a0) {
ret <16 x i8> %1
}
+define <16 x i8> @combine_pavgb_zero(<16 x i8> %a0) {
+; SSE-LABEL: combine_pavgb_zero:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pavgb_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> zeroinitializer, <16 x i8> %a0)
+ ret <16 x i8> %1
+}
+
define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; SSE-LABEL: combine_pavgw_knownbits:
; SSE: # %bb.0:
@@ -64,3 +80,33 @@ define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16
%trunc = trunc <16 x i16> %shuffle to <16 x i8>
ret <16 x i8> %trunc
}
+
+define <8 x i16> @combine_pavgw_demandedelts(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_pavgw_demandedelts:
+; SSE: # %bb.0:
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,12,13,12,13]
+; SSE-NEXT: pavgw %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: combine_pavgw_demandedelts:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,12,13,12,13]
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_pavgw_demandedelts:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %s0 = shufflevector <8 x i16> %a0, <8 x i16> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %avg = tail call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %s0, <8 x i16> %a1)
+ %shuffle = shufflevector <8 x i16> %avg, <8 x i16> poison, <8 x i32> zeroinitializer
+ ret <8 x i16> %shuffle
+}
+
diff --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll
index 0675ced..7eee418 100644
--- a/llvm/test/CodeGen/X86/combine-sra.ll
+++ b/llvm/test/CodeGen/X86/combine-sra.ll
@@ -521,3 +521,276 @@ define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
%2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10>
ret <4 x i32> %2
}
+
+define <8 x i16> @combine_vec8i16_ashr_clamped(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: combine_vec8i16_ashr_clamped:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psubusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: psubw %xmm2, %xmm1
+; SSE2-NEXT: psllw $12, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: psraw $4, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: psraw $2, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm1
+; SSE2-NEXT: psraw $15, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm0, %xmm2
+; SSE2-NEXT: psraw $1, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec8i16_ashr_clamped:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psllw $12, %xmm0
+; SSE41-NEXT: psllw $4, %xmm1
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $8, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $4, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $2, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $1, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: combine_vec8i16_ashr_clamped:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: combine_vec8i16_ashr_clamped:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsravw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %1 = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %y, <8 x i16> <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>)
+ %2 = ashr <8 x i16> %x, %1
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @combine_vec4i32_ashr_clamped(<4 x i32> %x, <4 x i32> %y) {
+; SSE2-LABEL: combine_vec4i32_ashr_clamped:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm1, %xmm3
+; SSE2-NEXT: psrld $27, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrad %xmm1, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad %xmm4, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrad %xmm3, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: psrad %xmm2, %xmm0
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec4i32_ashr_clamped:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrad %xmm2, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: psrad %xmm4, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrad %xmm1, %xmm3
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: psrad %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: combine_vec4i32_ashr_clamped:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %y, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
+ %2 = ashr <4 x i32> %x, %1
+ ret <4 x i32> %2
+}
+
+define <4 x i64> @combine_vec4i64_ashr_clamped(<4 x i64> %x, <4 x i64> %y) {
+; SSE2-LABEL: combine_vec4i64_ashr_clamped:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483711,2147483711,2147483711,2147483711]
+; SSE2-NEXT: movdqa %xmm7, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [63,63]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pandn %xmm6, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm6, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: psrlq %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlq %xmm6, %xmm7
+; SSE2-NEXT: movsd {{.*#+}} xmm7 = xmm5[0],xmm7[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrlq %xmm3, %xmm5
+; SSE2-NEXT: psrlq %xmm6, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
+; SSE2-NEXT: xorpd %xmm7, %xmm0
+; SSE2-NEXT: psubq %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psrlq %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; SSE2-NEXT: psrlq %xmm5, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrlq %xmm4, %xmm3
+; SSE2-NEXT: psrlq %xmm5, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
+; SSE2-NEXT: xorpd %xmm2, %xmm1
+; SSE2-NEXT: psubq %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec4i64_ashr_clamped:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259519,9223372039002259519]
+; SSE41-NEXT: movdqa %xmm8, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483711,2147483711,2147483711,2147483711]
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm0
+; SSE41-NEXT: pand %xmm6, %xmm0
+; SSE41-NEXT: movapd {{.*#+}} xmm9 = [63,63]
+; SSE41-NEXT: movapd %xmm9, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm6
+; SSE41-NEXT: pxor %xmm2, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pand %xmm8, %xmm5
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm9
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrlq %xmm9, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: psrlq %xmm3, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm4, %xmm2
+; SSE41-NEXT: psrlq %xmm9, %xmm2
+; SSE41-NEXT: psrlq %xmm3, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT: pxor %xmm5, %xmm4
+; SSE41-NEXT: psubq %xmm5, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrlq %xmm6, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE41-NEXT: psrlq %xmm3, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrlq %xmm6, %xmm2
+; SSE41-NEXT: psrlq %xmm3, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: psubq %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: combine_vec4i64_ashr_clamped:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775870,9223372036854775870,9223372036854775870,9223372036854775870]
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [63,63,63,63]
+; AVX2-NEXT: vblendvpd %ymm3, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: combine_vec4i64_ashr_clamped:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsravq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %y, <4 x i64> <i64 63, i64 63, i64 63, i64 63>)
+ %2 = ashr <4 x i64> %x, %1
+ ret <4 x i64> %2
+}
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 42b325d..734abfe 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -322,5 +322,132 @@ define void @g(i32 %a) nounwind {
ret void
}
+define i32 @shift_zext_shl(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $64, %eax
+; X86-NEXT: shll $9, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_zext_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $64, %eax
+; X64-NEXT: shll $9, %eax
+; X64-NEXT: retq
+ %a = and i8 %x, 64
+ %b = zext i8 %a to i16
+ %c = shl i16 %b, 9
+ %d = zext i16 %c to i32
+ ret i32 %d
+}
+
+define i32 @shift_zext_shl2(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl2:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $64, %eax
+; X86-NEXT: shll $9, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_zext_shl2:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $64, %eax
+; X64-NEXT: shll $9, %eax
+; X64-NEXT: retq
+ %a = and i8 %x, 64
+ %b = zext i8 %a to i32
+ %c = shl i32 %b, 9
+ ret i32 %c
+}
+
+define <4 x i32> @shift_zext_shl_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl_vec:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $64, %ecx
+; X86-NEXT: shll $9, %ecx
+; X86-NEXT: andl $63, %edx
+; X86-NEXT: shll $8, %edx
+; X86-NEXT: andl $31, %esi
+; X86-NEXT: shll $7, %esi
+; X86-NEXT: andl $23, %edi
+; X86-NEXT: shll $6, %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X64-LABEL: shift_zext_shl_vec:
+; X64: # %bb.0:
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT: retq
+ %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+ %b = zext <4 x i8> %a to <4 x i16>
+ %c = shl <4 x i16> %b, <i16 9, i16 8, i16 7, i16 6>
+ %d = zext <4 x i16> %c to <4 x i32>
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @shift_zext_shl2_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl2_vec:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: andl $23, %edi
+; X86-NEXT: andl $31, %esi
+; X86-NEXT: andl $63, %edx
+; X86-NEXT: andl $64, %ecx
+; X86-NEXT: shll $9, %ecx
+; X86-NEXT: shll $8, %edx
+; X86-NEXT: shll $7, %esi
+; X86-NEXT: shll $6, %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X64-LABEL: shift_zext_shl2_vec:
+; X64: # %bb.0:
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT: retq
+ %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+ %b = zext <4 x i8> %a to <4 x i32>
+ %c = shl <4 x i32> %b, <i32 9, i32 8, i32 7, i32 6>
+ ret <4 x i32> %c
+}
+
declare dso_local void @f(i64)
diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll
index 9d573ef..022b25a 100644
--- a/llvm/test/CodeGen/X86/extractelement-load.ll
+++ b/llvm/test/CodeGen/X86/extractelement-load.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2
@@ -7,23 +7,16 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define i32 @t(ptr %val) nounwind {
-; X32-SSE2-LABEL: t:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl 8(%eax), %eax
+; X86-SSE2-NEXT: retl
;
-; X64-SSSE3-LABEL: t:
-; X64-SSSE3: # %bb.0:
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm0, %eax
-; X64-SSSE3-NEXT: retq
-;
-; X64-AVX-LABEL: t:
-; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: movl 8(%rdi), %eax
-; X64-AVX-NEXT: retq
+; X64-LABEL: t:
+; X64: # %bb.0:
+; X64-NEXT: movl 8(%rdi), %eax
+; X64-NEXT: retq
%tmp2 = load <2 x i64>, ptr %val, align 16 ; <<2 x i64>> [#uses=1]
%tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
@@ -33,9 +26,9 @@ define i32 @t(ptr %val) nounwind {
; Case where extractelement of load ends up as undef.
; (Making sure this doesn't crash.)
define i32 @t2(ptr %xp) {
-; X32-SSE2-LABEL: t2:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t2:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: t2:
; X64: # %bb.0:
@@ -51,12 +44,12 @@ define i32 @t2(ptr %xp) {
; narrow load.
define void @t3(ptr %a0) {
-; X32-SSE2-LABEL: t3:
-; X32-SSE2: # %bb.0: # %bb
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movups (%eax), %xmm0
-; X32-SSE2-NEXT: movhps %xmm0, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t3:
+; X86-SSE2: # %bb.0: # %bb
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movups (%eax), %xmm0
+; X86-SSE2-NEXT: movhps %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t3:
; X64-SSSE3: # %bb.0: # %bb
@@ -81,14 +74,12 @@ bb:
; This is testing for an assertion - the extraction was assuming that the undef
; second shuffle operand was a post-bitcast type instead of a pre-bitcast type.
define i64 @t4(ptr %a) {
-; X32-SSE2-LABEL: t4:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movdqa (%eax), %xmm0
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %edx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t4:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %eax
+; X86-SSE2-NEXT: movl 4(%ecx), %edx
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: t4:
; X64: # %bb.0:
@@ -103,13 +94,13 @@ define i64 @t4(ptr %a) {
; Don't extract from a volatile.
define void @t5(ptr%a0, ptr%a1) {
-; X32-SSE2-LABEL: t5:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movaps (%ecx), %xmm0
-; X32-SSE2-NEXT: movhps %xmm0, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t5:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movaps (%ecx), %xmm0
+; X86-SSE2-NEXT: movhps %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t5:
; X64-SSSE3: # %bb.0:
@@ -130,24 +121,24 @@ define void @t5(ptr%a0, ptr%a1) {
; Check for multiuse.
define float @t6(ptr%a0) {
-; X32-SSE2-LABEL: t6:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %eax
-; X32-SSE2-NEXT: .cfi_def_cfa_offset 8
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movaps (%eax), %xmm0
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%esp)
-; X32-SSE2-NEXT: flds (%esp)
-; X32-SSE2-NEXT: popl %eax
-; X32-SSE2-NEXT: .cfi_def_cfa_offset 4
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t6:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movaps (%eax), %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpeqss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t6:
; X64-SSSE3: # %bb.0:
@@ -184,20 +175,20 @@ define float @t6(ptr%a0) {
}
define void @PR43971(ptr%a0, ptr%a1) {
-; X32-SSE2-LABEL: PR43971:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movaps 16(%ecx), %xmm0
-; X32-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpltss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: PR43971:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movaps 16(%ecx), %xmm0
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpltss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971:
; X64-SSSE3: # %bb.0: # %entry
@@ -231,22 +222,22 @@ entry:
}
define float @PR43971_1(ptr%a0) nounwind {
-; X32-SSE2-LABEL: PR43971_1:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: pushl %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movaps (%eax), %xmm0
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%esp)
-; X32-SSE2-NEXT: flds (%esp)
-; X32-SSE2-NEXT: popl %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: PR43971_1:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movaps (%eax), %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpeqss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971_1:
; X64-SSSE3: # %bb.0: # %entry
@@ -283,17 +274,48 @@ entry:
ret float %cond
}
+define i32 @PR85419(ptr %p0) {
+; X86-SSE2-LABEL: PR85419:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %edx
+; X86-SSE2-NEXT: xorl %eax, %eax
+; X86-SSE2-NEXT: orl 4(%ecx), %edx
+; X86-SSE2-NEXT: je .LBB8_2
+; X86-SSE2-NEXT: # %bb.1:
+; X86-SSE2-NEXT: movl 8(%ecx), %eax
+; X86-SSE2-NEXT: .LBB8_2:
+; X86-SSE2-NEXT: retl
+;
+; X64-LABEL: PR85419:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq $0, (%rdi)
+; X64-NEXT: je .LBB8_2
+; X64-NEXT: # %bb.1:
+; X64-NEXT: movl 8(%rdi), %eax
+; X64-NEXT: .LBB8_2:
+; X64-NEXT: retq
+ %load = load <2 x i64>, ptr %p0, align 16
+ %vecext.i = extractelement <2 x i64> %load, i64 0
+ %cmp = icmp eq i64 %vecext.i, 0
+ %.cast = bitcast <2 x i64> %load to <4 x i32>
+ %vecext.i2 = extractelement <4 x i32> %.cast, i64 2
+ %retval.0 = select i1 %cmp, i32 0, i32 %vecext.i2
+ ret i32 %retval.0
+}
+
; Test for bad extractions from a VBROADCAST_LOAD of the <2 x i16> non-uniform constant bitcast as <4 x i32>.
define void @subextract_broadcast_load_constant(ptr nocapture %0, ptr nocapture %1, ptr nocapture %2) nounwind {
-; X32-SSE2-LABEL: subextract_broadcast_load_constant:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl $-1583308898, (%edx) # imm = 0xA1A09F9E
-; X32-SSE2-NEXT: movw $-24674, (%ecx) # imm = 0x9F9E
-; X32-SSE2-NEXT: movw $-24160, (%eax) # imm = 0xA1A0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: subextract_broadcast_load_constant:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl $-1583308898, (%edx) # imm = 0xA1A09F9E
+; X86-SSE2-NEXT: movw $-24674, (%ecx) # imm = 0x9F9E
+; X86-SSE2-NEXT: movw $-24160, (%eax) # imm = 0xA1A0
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: subextract_broadcast_load_constant:
; X64: # %bb.0:
@@ -319,15 +341,15 @@ define void @subextract_broadcast_load_constant(ptr nocapture %0, ptr nocapture
; A scalar load is favored over a XMM->GPR register transfer in this example.
define i32 @multi_use_load_scalarization(ptr %p) nounwind {
-; X32-SSE2-LABEL: multi_use_load_scalarization:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl (%ecx), %eax
-; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
-; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE2-NEXT: psubd %xmm1, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: multi_use_load_scalarization:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %eax
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-SSE2-NEXT: psubd %xmm1, %xmm0
+; X86-SSE2-NEXT: movdqa %xmm0, (%ecx)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_load_scalarization:
; X64-SSSE3: # %bb.0:
@@ -354,15 +376,15 @@ define i32 @multi_use_load_scalarization(ptr %p) nounwind {
}
define i32 @multi_use_volatile_load_scalarization(ptr %p) nounwind {
-; X32-SSE2-LABEL: multi_use_volatile_load_scalarization:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
-; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: psubd %xmm1, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: multi_use_volatile_load_scalarization:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-SSE2-NEXT: movd %xmm0, %eax
+; X86-SSE2-NEXT: psubd %xmm1, %xmm0
+; X86-SSE2-NEXT: movdqa %xmm0, (%ecx)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_volatile_load_scalarization:
; X64-SSSE3: # %bb.0:
@@ -398,41 +420,41 @@ define i32 @multi_use_volatile_load_scalarization(ptr %p) nounwind {
@zero = internal unnamed_addr global <8 x i32> zeroinitializer, align 32
define i32 @main() nounwind {
-; X32-SSE2-LABEL: main:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %ebp
-; X32-SSE2-NEXT: movl %esp, %ebp
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: andl $-32, %esp
-; X32-SSE2-NEXT: subl $64, %esp
-; X32-SSE2-NEXT: movdqa zero, %xmm0
-; X32-SSE2-NEXT: movaps n1+16, %xmm1
-; X32-SSE2-NEXT: movaps n1, %xmm2
-; X32-SSE2-NEXT: movaps %xmm2, zero
-; X32-SSE2-NEXT: movaps %xmm1, zero+16
-; X32-SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
-; X32-SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
-; X32-SSE2-NEXT: movaps %xmm1, (%esp)
-; X32-SSE2-NEXT: movdqa (%esp), %xmm1
-; X32-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm2
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm2, %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm2, %ecx
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: divl %ecx
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %esi
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: divl %esi
-; X32-SSE2-NEXT: addl %ecx, %eax
-; X32-SSE2-NEXT: leal -4(%ebp), %esp
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %ebp
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: main:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: andl $-32, %esp
+; X86-SSE2-NEXT: subl $64, %esp
+; X86-SSE2-NEXT: movaps n1+16, %xmm0
+; X86-SSE2-NEXT: movaps n1, %xmm1
+; X86-SSE2-NEXT: movl zero+4, %ecx
+; X86-SSE2-NEXT: movl zero+8, %eax
+; X86-SSE2-NEXT: movaps %xmm1, zero
+; X86-SSE2-NEXT: movaps %xmm0, zero+16
+; X86-SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,2,2,2]
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, (%esp)
+; X86-SSE2-NEXT: movdqa (%esp), %xmm0
+; X86-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; X86-SSE2-NEXT: movd %xmm1, %esi
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: divl %esi
+; X86-SSE2-NEXT: movl %eax, %esi
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: movd %xmm0, %edi
+; X86-SSE2-NEXT: movl %ecx, %eax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: divl %edi
+; X86-SSE2-NEXT: addl %esi, %eax
+; X86-SSE2-NEXT: leal -8(%ebp), %esp
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: main:
; X64-SSSE3: # %bb.0:
@@ -440,31 +462,29 @@ define i32 @main() nounwind {
; X64-SSSE3-NEXT: movq %rsp, %rbp
; X64-SSSE3-NEXT: andq $-32, %rsp
; X64-SSSE3-NEXT: subq $64, %rsp
-; X64-SSSE3-NEXT: movdqa zero(%rip), %xmm0
; X64-SSSE3-NEXT: movq n1@GOTPCREL(%rip), %rax
-; X64-SSSE3-NEXT: movaps (%rax), %xmm1
-; X64-SSSE3-NEXT: movaps 16(%rax), %xmm2
-; X64-SSSE3-NEXT: movaps %xmm1, zero(%rip)
-; X64-SSSE3-NEXT: movaps %xmm2, zero+16(%rip)
-; X64-SSSE3-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
-; X64-SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
-; X64-SSSE3-NEXT: movaps %xmm1, (%rsp)
-; X64-SSSE3-NEXT: movdqa (%rsp), %xmm1
-; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm2, %eax
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm2, %ecx
+; X64-SSSE3-NEXT: movaps (%rax), %xmm0
+; X64-SSSE3-NEXT: movaps 16(%rax), %xmm1
+; X64-SSSE3-NEXT: movl zero+4(%rip), %ecx
+; X64-SSSE3-NEXT: movl zero+8(%rip), %eax
+; X64-SSSE3-NEXT: movaps %xmm0, zero(%rip)
+; X64-SSSE3-NEXT: movaps %xmm1, zero+16(%rip)
+; X64-SSSE3-NEXT: movaps {{.*#+}} xmm0 = [2,2,2,2]
+; X64-SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSSE3-NEXT: movaps %xmm0, (%rsp)
+; X64-SSSE3-NEXT: movdqa (%rsp), %xmm0
+; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; X64-SSSE3-NEXT: movd %xmm1, %esi
; X64-SSSE3-NEXT: xorl %edx, %edx
-; X64-SSSE3-NEXT: divl %ecx
-; X64-SSSE3-NEXT: movl %eax, %ecx
+; X64-SSSE3-NEXT: divl %esi
+; X64-SSSE3-NEXT: movl %eax, %esi
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X64-SSSE3-NEXT: movd %xmm0, %eax
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; X64-SSSE3-NEXT: movd %xmm0, %esi
+; X64-SSSE3-NEXT: movd %xmm0, %edi
+; X64-SSSE3-NEXT: movl %ecx, %eax
; X64-SSSE3-NEXT: xorl %edx, %edx
-; X64-SSSE3-NEXT: divl %esi
-; X64-SSSE3-NEXT: addl %ecx, %eax
+; X64-SSSE3-NEXT: divl %edi
+; X64-SSSE3-NEXT: addl %esi, %eax
; X64-SSSE3-NEXT: movq %rbp, %rsp
; X64-SSSE3-NEXT: popq %rbp
; X64-SSSE3-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
index 56cbe3f..37a90a2 100644
--- a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
+++ b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
@@ -119,6 +119,7 @@
name: foo
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
stack:
- { id: 0, name: a.addr, size: 4, alignment: 4, debug-info-variable: '!11',
diff --git a/llvm/test/CodeGen/X86/heap-alloc-markers.mir b/llvm/test/CodeGen/X86/heap-alloc-markers.mir
index 0bf8365..6e0dc50 100644
--- a/llvm/test/CodeGen/X86/heap-alloc-markers.mir
+++ b/llvm/test/CodeGen/X86/heap-alloc-markers.mir
@@ -34,6 +34,7 @@ name: test
# CHECK-LABEL: {{^}}test:
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/huge-stack-offset.ll b/llvm/test/CodeGen/X86/huge-stack-offset.ll
index 68dcfa7..e825328 100644
--- a/llvm/test/CodeGen/X86/huge-stack-offset.ll
+++ b/llvm/test/CodeGen/X86/huge-stack-offset.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=x86_64-linux-unknown | FileCheck %s --check-prefix=CHECK-64
-; RUN: llc < %s -mtriple=i386-linux-unknown | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -mtriple=x86_64-linux-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -mtriple=i386-linux-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-32
; Test that a large stack offset uses a single add/sub instruction to
; adjust the stack pointer.
diff --git a/llvm/test/CodeGen/X86/huge-stack-offset2.ll b/llvm/test/CodeGen/X86/huge-stack-offset2.ll
index 3bf0260..053643eb 100644
--- a/llvm/test/CodeGen/X86/huge-stack-offset2.ll
+++ b/llvm/test/CodeGen/X86/huge-stack-offset2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-linux -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
; Test how we handle pathologically large stack frames when RAX is live through
; the prologue and epilogue.
diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll
index 8ed8495..5420e6b 100644
--- a/llvm/test/CodeGen/X86/insertelement-var-index.ll
+++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll
@@ -1009,18 +1009,19 @@ define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $48, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %eax
-; X86AVX2-NEXT: movl 12(%ebp), %ecx
-; X86AVX2-NEXT: movl 16(%ebp), %edx
+; X86AVX2-NEXT: movl 8(%ebp), %edx
+; X86AVX2-NEXT: movl 12(%ebp), %eax
+; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: leal (%edx,%edx), %esi
+; X86AVX2-NEXT: addl %ecx, %ecx
+; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $3, %esi
-; X86AVX2-NEXT: movl %eax, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%edx,%edx), %eax
-; X86AVX2-NEXT: andl $3, %eax
-; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
+; X86AVX2-NEXT: incl %ecx
+; X86AVX2-NEXT: andl $3, %ecx
+; X86AVX2-NEXT: movl %eax, 16(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -1362,12 +1363,13 @@ define <2 x i64> @load_i64_v2i64(<2 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: leal (%eax,%eax), %esi
+; X86AVX2-NEXT: addl %eax, %eax
+; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $3, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
@@ -1742,18 +1744,19 @@ define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $96, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %eax
-; X86AVX2-NEXT: movl 12(%ebp), %ecx
-; X86AVX2-NEXT: movl 16(%ebp), %edx
+; X86AVX2-NEXT: movl 8(%ebp), %edx
+; X86AVX2-NEXT: movl 12(%ebp), %eax
+; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: leal (%edx,%edx), %esi
+; X86AVX2-NEXT: addl %ecx, %ecx
+; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $7, %esi
-; X86AVX2-NEXT: movl %eax, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%edx,%edx), %eax
-; X86AVX2-NEXT: andl $7, %eax
-; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
+; X86AVX2-NEXT: incl %ecx
+; X86AVX2-NEXT: andl $7, %ecx
+; X86AVX2-NEXT: movl %eax, 32(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -2128,12 +2131,13 @@ define <4 x i64> @load_i64_v4i64(<4 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: leal (%eax,%eax), %esi
+; X86AVX2-NEXT: addl %eax, %eax
+; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $7, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
diff --git a/llvm/test/CodeGen/X86/instr-symbols.mir b/llvm/test/CodeGen/X86/instr-symbols.mir
index a900288..7af6ca8 100644
--- a/llvm/test/CodeGen/X86/instr-symbols.mir
+++ b/llvm/test/CodeGen/X86/instr-symbols.mir
@@ -23,6 +23,7 @@ name: test
# CHECK-LABEL: {{^}}test:
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/int-to-fp-demanded.ll b/llvm/test/CodeGen/X86/int-to-fp-demanded.ll
new file mode 100644
index 0000000..cdde03f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/int-to-fp-demanded.ll
@@ -0,0 +1,382 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+
+declare void @use.i1(i1)
+declare void @use.i32(i32)
+define i32 @sitofp_signbit_only(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: fildl (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movmskps %xmm0, %eax
+; X64-NEXT: shll $31, %eax
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @sitofp_signbit_only_okay_width(i16 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only_okay_width:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X86-NEXT: filds {{[0-9]+}}(%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only_okay_width:
+; X64: # %bb.0:
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: cvtsi2ss %eax, %xmm0
+; X64-NEXT: movmskps %xmm0, %eax
+; X64-NEXT: shll $31, %eax
+; X64-NEXT: retq
+ %f = sitofp i16 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @sitofp_signbit_only_fail_bad_width1(i64 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only_fail_bad_width1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl (%esp), %eax
+; X86-NEXT: popl %ecx
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only_fail_bad_width1:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %rdi, %xmm0
+; X64-NEXT: movmskps %xmm0, %eax
+; X64-NEXT: shll $31, %eax
+; X64-NEXT: retq
+ %f = sitofp i64 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define <2 x i16> @sitofp_signbit_only_fail_bad_width2(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_signbit_only_fail_bad_width2:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: fildl (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: shrl $16, %edx
+; X86-NEXT: andl $32768, %eax # imm = 0x8000
+; X86-NEXT: andl $32768, %edx # imm = 0x8000
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: # kill: def $dx killed $dx killed $edx
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_signbit_only_fail_bad_width2:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i2xi16 = bitcast float %f to <2 x i16>
+ %r = and <2 x i16> %i2xi16, <i16 32768, i16 32768>
+ ret <2 x i16> %r
+}
+
+define i32 @sitofp_many_bits_fail(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_many_bits_fail:
+; X86: # %bb.0:
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: fildl (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $-2147483647, %eax # imm = 0x80000001
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_many_bits_fail:
+; X64: # %bb.0:
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movd %xmm0, %eax
+; X64-NEXT: andl $-2147483647, %eax # imm = 0x80000001
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483649
+ ret i32 %r
+}
+
+define i32 @sitofp_multiuse_fail(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_multiuse_fail:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: fildl {{[0-9]+}}(%esp)
+; X86-NEXT: fsts {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i32@PLT
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_multiuse_fail:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movd %xmm0, %ebx
+; X64-NEXT: movl %ebx, %edi
+; X64-NEXT: callq use.i32@PLT
+; X64-NEXT: andl $-2147483648, %ebx # imm = 0x80000000
+; X64-NEXT: movl %ebx, %eax
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ call void @use.i32(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @sitofp_multiuse_okay(i32 %i_in) nounwind {
+; X86-LABEL: sitofp_multiuse_okay:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: fildl {{[0-9]+}}(%esp)
+; X86-NEXT: fsts {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i1@PLT
+; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_multiuse_okay:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: cvtsi2ss %edi, %xmm0
+; X64-NEXT: movd %xmm0, %ebx
+; X64-NEXT: movl %ebx, %edi
+; X64-NEXT: callq use.i1@PLT
+; X64-NEXT: andl $-2147483648, %ebx # imm = 0x80000000
+; X64-NEXT: movl %ebx, %eax
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+ %f = sitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %cmp = icmp slt i32 %i, 0
+ call void @use.i1(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_signbit_only(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_signbit_only_okay_width(i16 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only_okay_width:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only_okay_width:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %f = uitofp i16 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_signbit_only_okay_width1(i64 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only_okay_width1:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only_okay_width1:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %f = uitofp i64 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define <2 x i16> @uitofp_signbit_only_fail_bad_width2(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_signbit_only_fail_bad_width2:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl $32768, %eax # imm = 0x8000
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_signbit_only_fail_bad_width2:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i2xi16 = bitcast float %f to <2 x i16>
+ %r = and <2 x i16> %i2xi16, <i16 32768, i16 32768>
+ ret <2 x i16> %r
+}
+
+define i32 @uitofp_many_bits_fail(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_many_bits_fail:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $1, %eax
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_many_bits_fail:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: movd %xmm0, %eax
+; X64-NEXT: andl $1, %eax
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %r = and i32 %i, 2147483649
+ ret i32 %r
+}
+
+define i32 @uitofp_multiuse_fail(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_multiuse_fail:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i32@PLT
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_multiuse_fail:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: movd %xmm0, %edi
+; X64-NEXT: callq use.i32@PLT
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ call void @use.i32(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
+
+define i32 @uitofp_multiuse_okay(i32 %i_in) nounwind {
+; X86-LABEL: uitofp_multiuse_okay:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll use.i1@PLT
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: uitofp_multiuse_okay:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cvtsi2ss %rax, %xmm0
+; X64-NEXT: movd %xmm0, %edi
+; X64-NEXT: callq use.i1@PLT
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %f = uitofp i32 %i_in to float
+ %i = bitcast float %f to i32
+ %cmp = icmp slt i32 %i, 0
+ call void @use.i1(i32 %i)
+ %r = and i32 %i, 2147483648
+ ret i32 %r
+}
diff --git a/llvm/test/CodeGen/X86/isel-select-cmov.ll b/llvm/test/CodeGen/X86/isel-select-cmov.ll
index 0e5293c..39a20bf 100644
--- a/llvm/test/CodeGen/X86/isel-select-cmov.ll
+++ b/llvm/test/CodeGen/X86/isel-select-cmov.ll
@@ -13,6 +13,8 @@
; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-apple-darwin10 -verify-machineinstrs | FileCheck %s --check-prefix=GISEL-X86
; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-apple-darwin10 -verify-machineinstrs -mattr=+cmov | FileCheck %s --check-prefix=GISEL-X86-CMOV
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 -verify-machineinstrs -mattr=+ndd | FileCheck %s --check-prefix=NDD
+
; Test conditional move for the supported types (i16, i32, and i32) and
; conditon input (argument or cmp).
; When cmov is not available (i8 type or X86), the branch is expected.
@@ -114,6 +116,16 @@ define zeroext i8 @select_cmov_i8(i1 zeroext %cond, i8 zeroext %a, i8 zeroext %b
; GISEL-X86-CMOV-NEXT: cmovnew %dx, %ax
; GISEL-X86-CMOV-NEXT: ## kill: def $al killed $al killed $eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i8:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: jne LBB0_2
+; NDD-NEXT: ## %bb.1:
+; NDD-NEXT: movl %edx, %esi
+; NDD-NEXT: LBB0_2:
+; NDD-NEXT: movzbl %sil, %eax
+; NDD-NEXT: retq
%1 = select i1 %cond, i8 %a, i8 %b
ret i8 %1
}
@@ -207,6 +219,13 @@ define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroex
; GISEL-X86-CMOV-NEXT: cmovnew %dx, %ax
; GISEL-X86-CMOV-NEXT: ## kill: def $ax killed $ax killed $eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i16:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: cmovnew %si, %dx, %ax
+; NDD-NEXT: movzwl %ax, %eax
+; NDD-NEXT: retq
%1 = select i1 %cond, i16 %a, i16 %b
ret i16 %1
}
@@ -305,6 +324,13 @@ define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
; GISEL-X86-CMOV-NEXT: cmovew %cx, %ax
; GISEL-X86-CMOV-NEXT: ## kill: def $ax killed $ax killed $eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmp_cmov_i16:
+; NDD: ## %bb.0:
+; NDD-NEXT: cmpw %si, %di
+; NDD-NEXT: cmovbw %di, %si, %ax
+; NDD-NEXT: movzwl %ax, %eax
+; NDD-NEXT: retq
%1 = icmp ult i16 %a, %b
%2 = select i1 %1, i16 %a, i16 %b
ret i16 %2
@@ -391,6 +417,12 @@ define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
; GISEL-X86-CMOV-NEXT: testl %ecx, %ecx
; GISEL-X86-CMOV-NEXT: cmovnel {{[0-9]+}}(%esp), %eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i32:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: cmovnel %esi, %edx, %eax
+; NDD-NEXT: retq
%1 = select i1 %cond, i32 %a, i32 %b
ret i32 %1
}
@@ -482,6 +514,12 @@ define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
; GISEL-X86-CMOV-NEXT: andl $1, %edx
; GISEL-X86-CMOV-NEXT: cmovel %ecx, %eax
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmp_cmov_i32:
+; NDD: ## %bb.0:
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: cmovbl %edi, %esi, %eax
+; NDD-NEXT: retq
%1 = icmp ult i32 %a, %b
%2 = select i1 %1, i32 %a, i32 %b
ret i32 %2
@@ -584,6 +622,12 @@ define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
; GISEL-X86-CMOV-NEXT: cmovnel {{[0-9]+}}(%esp), %eax
; GISEL-X86-CMOV-NEXT: cmovnel {{[0-9]+}}(%esp), %edx
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmov_i64:
+; NDD: ## %bb.0:
+; NDD-NEXT: testb $1, %dil
+; NDD-NEXT: cmovneq %rsi, %rdx, %rax
+; NDD-NEXT: retq
%1 = select i1 %cond, i64 %a, i64 %b
ret i64 %1
}
@@ -754,6 +798,12 @@ define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) nounwind {
; GISEL-X86-CMOV-NEXT: popl %ebx
; GISEL-X86-CMOV-NEXT: popl %ebp
; GISEL-X86-CMOV-NEXT: retl
+;
+; NDD-LABEL: select_cmp_cmov_i64:
+; NDD: ## %bb.0:
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: cmovbq %rdi, %rsi, %rax
+; NDD-NEXT: retq
%1 = icmp ult i64 %a, %b
%2 = select i1 %1, i64 %a, i64 %b
ret i64 %2
diff --git a/llvm/test/CodeGen/X86/isel-traps.ll b/llvm/test/CodeGen/X86/isel-traps.ll
new file mode 100644
index 0000000..c207387
--- /dev/null
+++ b/llvm/test/CodeGen/X86/isel-traps.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,GISEL-X64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,X86
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,X86
+; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,GISEL-X86
+
+declare void @llvm.trap()
+
+define void @test_trap() {
+; ALL-LABEL: test_trap:
+; ALL: # %bb.0:
+; ALL-NEXT: ud2
+; ALL-NEXT: ret{{[l|q]}}
+ tail call void @llvm.trap()
+ ret void
+}
+
+define void @test_debugtrap() {
+; ALL-LABEL: test_debugtrap:
+; ALL: # %bb.0:
+; ALL-NEXT: int3
+; ALL-NEXT: ret{{[l|q]}}
+ tail call void @llvm.debugtrap()
+ ret void
+}
+
+define void @test_ubsantrap() {
+; ALL-LABEL: test_ubsantrap:
+; ALL: # %bb.0:
+; ALL-NEXT: ud1l 12(%eax), %eax
+; ALL-NEXT: ret{{[l|q]}}
+ call void @llvm.ubsantrap(i8 12)
+ ret void
+}
+
+define void @test_ubsantrap_custom() nounwind {
+; X64-LABEL: test_ubsantrap_custom:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $42, %edi
+; X64-NEXT: callq guide@PLT
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; GISEL-X64-LABEL: test_ubsantrap_custom:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: pushq %rax
+; GISEL-X64-NEXT: movl $42, %edi
+; GISEL-X64-NEXT: callq guide
+; GISEL-X64-NEXT: popq %rax
+; GISEL-X64-NEXT: retq
+;
+; X86-LABEL: test_ubsantrap_custom:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl $42, (%esp)
+; X86-NEXT: calll guide
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; GISEL-X86-LABEL: test_ubsantrap_custom:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: subl $12, %esp
+; GISEL-X86-NEXT: movl $42, %eax
+; GISEL-X86-NEXT: movl %eax, (%esp)
+; GISEL-X86-NEXT: calll guide
+; GISEL-X86-NEXT: addl $12, %esp
+; GISEL-X86-NEXT: retl
+ call void @llvm.ubsantrap(i8 42) "trap-func-name"="guide"
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/known-never-zero.ll b/llvm/test/CodeGen/X86/known-never-zero.ll
index cc98627..39d02f9 100644
--- a/llvm/test/CodeGen/X86/known-never-zero.ll
+++ b/llvm/test/CodeGen/X86/known-never-zero.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
;; Use cttz to test if we properly prove never-zero. There is a very
;; simple transform from cttz -> cttz_zero_undef if its operand is
@@ -9,50 +10,82 @@ declare i32 @llvm.uadd.sat.i32(i32, i32)
declare i32 @llvm.umax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
declare i32 @llvm.smin.i32(i32, i32)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
declare i32 @llvm.smax.i32(i32, i32)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
declare i32 @llvm.bswap.i32(i32)
declare i32 @llvm.bitreverse.i32(i32)
declare i32 @llvm.ctpop.i32(i32)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
declare i32 @llvm.abs.i32(i32, i1)
declare i32 @llvm.fshl.i32(i32, i32, i32)
declare i32 @llvm.fshr.i32(i32, i32, i32)
define i32 @or_known_nonzero(i32 %x) {
-; CHECK-LABEL: or_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%z = or i32 %x, 1
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @or_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: or_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl %esi, %edi
-; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB1_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB1_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB1_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl %esi, %edi
+; X64-NEXT: je .LBB1_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB1_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = or i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @select_known_nonzero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: movl $122, %eax
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $122, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: movl $122, %eax
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 122
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -60,20 +93,36 @@ define i32 @select_known_nonzero(i1 %c, i32 %x) {
}
define i32 @select_maybe_zero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB3_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB3_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl $1, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB3_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB3_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB3_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB3_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 0
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -81,28 +130,45 @@ define i32 @select_maybe_zero(i1 %c, i32 %x) {
}
define i32 @shl_known_nonzero_1s_bit_set(i32 %x) {
-; CHECK-LABEL: shl_known_nonzero_1s_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $123, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_1s_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $123, %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_1s_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $123, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = shl i32 123, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -110,14 +176,23 @@ define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -125,67 +200,116 @@ define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @shl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: shl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB7_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB7_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB7_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB7_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB7_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB7_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = shl nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_known_nonzero(i32 %x) {
-; CHECK-LABEL: uaddsat_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: incl %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovnel %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: incl %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovnel %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 1)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: uaddsat_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovael %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB9_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB9_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: cmovael %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB9_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB9_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovael %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB9_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umax_known_nonzero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cmpl %eax, %edi
-; CHECK-NEXT: cmoval %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: cmpl %edx, %eax
+; X86-NEXT: cmoval %eax, %edx
+; X86-NEXT: rep bsfl %edx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cmpl %eax, %edi
+; X64-NEXT: cmoval %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%yy = shl nuw i32 4, %y
%z = call i32 @llvm.umax.i32(i32 %x, i32 %yy)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -193,35 +317,62 @@ define i32 @umax_known_nonzero(i32 %x, i32 %y) {
}
define i32 @umax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: cmoval %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB11_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB11_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl %eax, %ecx
+; X86-NEXT: cmoval %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB11_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB11_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl %esi, %edi
+; X64-NEXT: cmoval %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB11_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB11_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umax.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: umin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovbl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovbl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovbl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.umin.i32(i32 %x, i32 %y)
@@ -230,36 +381,63 @@ define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @umin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB13_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB13_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB13_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB13_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovbl %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB13_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB13_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovll %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovll %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovll %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smin.i32(i32 %x, i32 %y)
@@ -267,37 +445,120 @@ define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
ret i32 %r
}
+define i32 @smin_known_zero(i32 %x, i32 %y) {
+; X86-LABEL: smin_known_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $-54, %eax
+; X86-NEXT: movl $-54, %ecx
+; X86-NEXT: cmovll %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $-54, %edi
+; X64-NEXT: movl $-54, %eax
+; X64-NEXT: cmovll %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+ %z = call i32 @llvm.smin.i32(i32 %x, i32 -54)
+ %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+ ret i32 %r
+}
+
+define <4 x i32> @smin_known_zero_vec(<4 x i32> %x, <4 x i32> %y) {
+; X86-LABEL: smin_known_zero_vec:
+; X86: # %bb.0:
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967242,4294967273,4294967284,4294967295]
+; X86-NEXT: movdqa %xmm1, %xmm2
+; X86-NEXT: pcmpgtd %xmm0, %xmm2
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: pandn %xmm1, %xmm2
+; X86-NEXT: por %xmm2, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: paddd %xmm0, %xmm1
+; X86-NEXT: pand %xmm1, %xmm0
+; X86-NEXT: pxor %xmm1, %xmm1
+; X86-NEXT: pcmpeqd %xmm1, %xmm0
+; X86-NEXT: psrld $31, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_zero_vec:
+; X64: # %bb.0:
+; X64-NEXT: vpminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpsrld $31, %xmm0, %xmm0
+; X64-NEXT: retq
+ %z = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %x, <4 x i32> <i32 -54, i32 -23, i32 -12, i32 -1>)
+ %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z)
+ %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1>
+ %ret = zext <4 x i1> %3 to <4 x i32>
+ ret <4 x i32> %ret
+}
+
define i32 @smin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovll %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB15_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB15_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovll %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB17_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB17_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovll %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB17_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB17_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovgl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovgl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovgl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smax.i32(i32 %x, i32 %y)
@@ -306,35 +567,125 @@ define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @smax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $55, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovgel %edi, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $55, %eax
+; X86-NEXT: movl $54, %ecx
+; X86-NEXT: cmovgel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $55, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovgel %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smax.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
+define <4 x i32> @smax_known_zero_vec(<4 x i32> %x, <4 x i32> %y) {
+; X86-LABEL: smax_known_zero_vec:
+; X86: # %bb.0:
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [54,23,12,1]
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: pcmpgtd %xmm1, %xmm2
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: pandn %xmm1, %xmm2
+; X86-NEXT: por %xmm2, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: paddd %xmm0, %xmm1
+; X86-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pxor %xmm1, %xmm0
+; X86-NEXT: pcmpgtd %xmm1, %xmm0
+; X86-NEXT: psrld $31, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_zero_vec:
+; X64: # %bb.0:
+; X64-NEXT: vpmaxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpminud %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: retq
+ %z = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %x, <4 x i32> <i32 54, i32 23, i32 12, i32 1>)
+ %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z)
+ %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1>
+ %ret = zext <4 x i1> %3 to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+define i32 @smax_known_zero(i32 %x, i32 %y) {
+; X86-LABEL: smax_known_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: testl %ecx, %ecx
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: cmovnsl %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB21_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB21_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_zero:
+; X64: # %bb.0:
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovnsl %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB21_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB21_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
+ %z = call i32 @llvm.smax.i32(i32 %x, i32 -1)
+ %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+ ret i32 %r
+}
+
define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB18_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB18_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB22_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB22_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB22_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB22_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
@@ -345,19 +696,33 @@ define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB19_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB19_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB23_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB23_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB23_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB23_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
%shl = shl i32 %x, %sub
@@ -367,14 +732,23 @@ define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -382,39 +756,68 @@ define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_with_fshr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB21_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB21_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB25_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB25_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB25_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB25_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB22_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB22_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB26_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB26_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB26_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB26_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
@@ -425,19 +828,33 @@ define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB23_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB23_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB27_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB27_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB27_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB27_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
%shr = lshr i32 %x, %sub
@@ -447,14 +864,23 @@ define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -462,47 +888,78 @@ define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_with_fshl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB25_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB25_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB29_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB29_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB29_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB29_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: sra_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = ashr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: sra_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -510,47 +967,78 @@ define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @sra_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sra_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB28_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB28_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB32_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB32_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB32_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB32_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: srl_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = lshr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: srl_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -558,33 +1046,56 @@ define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @srl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: srl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB31_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB31_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB35_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB35_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB35_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB35_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: udiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -592,33 +1103,56 @@ define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @udiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: udiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB33_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB33_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB37_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB37_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB37_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB37_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: sdiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -626,31 +1160,53 @@ define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @sdiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sdiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB35_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB35_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB39_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB39_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB39_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB39_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @add_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nuw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -658,17 +1214,30 @@ define i32 @add_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @add_maybe_zero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: je .LBB37_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB37_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB41_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB41_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: je .LBB41_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB41_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nsw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -676,15 +1245,24 @@ define i32 @add_maybe_zero(i32 %xx, i32 %y) {
}
define i32 @sub_known_nonzero_neg_case(i32 %xx) {
-; CHECK-LABEL: sub_known_nonzero_neg_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_neg_case:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_neg_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i32 256, %xx
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -692,14 +1270,24 @@ define i32 @sub_known_nonzero_neg_case(i32 %xx) {
}
define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
-; CHECK-LABEL: sub_known_nonzero_ne_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: andl $-65, %edi
-; CHECK-NEXT: subl %eax, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_ne_case:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: orl $64, %ecx
+; X86-NEXT: andl $-65, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_ne_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: andl $-65, %edi
+; X64-NEXT: subl %eax, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%y = and i32 %xx, -65
%z = sub i32 %y, %x
@@ -708,18 +1296,32 @@ define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
}
define i32 @sub_maybe_zero(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: subl %edi, %eax
-; CHECK-NEXT: je .LBB40_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB40_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: je .LBB44_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB44_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: je .LBB44_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB44_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 64
%z = sub i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -727,34 +1329,60 @@ define i32 @sub_maybe_zero(i32 %x) {
}
define i32 @sub_maybe_zero2(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: negl %edi
-; CHECK-NEXT: je .LBB41_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB41_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: je .LBB45_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB45_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero2:
+; X64: # %bb.0:
+; X64-NEXT: negl %edi
+; X64-NEXT: je .LBB45_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB45_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB42_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB42_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB46_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB46_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB46_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB46_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -762,18 +1390,32 @@ define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB43_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB43_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB47_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB47_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB47_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB47_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -781,36 +1423,63 @@ define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @mul_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: mul_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: imull %esi, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB44_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB44_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB48_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB48_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: imull %esi, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB48_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB48_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = mul nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
-; CHECK-LABEL: bitcast_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; CHECK-NEXT: pslld $23, %xmm0
-; CHECK-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: cvttps2dq %xmm0, %xmm0
-; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; CHECK-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; X86-NEXT: pslld $23, %xmm0
+; X86-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: cvttps2dq %xmm0, %xmm0
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; X86-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: bsfl %eax, %ecx
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X64-NEXT: vpslld $23, %xmm0, %xmm0
+; X64-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: bsfl %eax, %ecx
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: cmovnel %ecx, %eax
+; X64-NEXT: retq
%x = shl nuw nsw <2 x i16> <i16 256, i16 256>, %xx
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -818,49 +1487,83 @@ define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
}
define i32 @bitcast_maybe_zero(<2 x i16> %x) {
-; CHECK-LABEL: bitcast_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB46_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB46_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB50_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB50_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB50_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB50_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_from_float(float %x) {
-; CHECK-LABEL: bitcast_from_float:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB47_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB47_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_from_float:
+; X86: # %bb.0:
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB51_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB51_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_from_float:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB51_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB51_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast float %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @zext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: zext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -868,32 +1571,54 @@ define i32 @zext_known_nonzero(i16 %xx) {
}
define i32 @zext_maybe_zero(i16 %x) {
-; CHECK-LABEL: zext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB49_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movzwl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB49_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testw %ax, %ax
+; X86-NEXT: je .LBB53_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB53_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB53_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB53_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: sext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cwtl
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: cwtl
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cwtl
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -901,17 +1626,29 @@ define i32 @sext_known_nonzero(i16 %xx) {
}
define i32 @sext_maybe_zero(i16 %x) {
-; CHECK-LABEL: sext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB51_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movswl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB51_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB55_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB55_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB55_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB55_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
diff --git a/llvm/test/CodeGen/X86/late-remat-update.mir b/llvm/test/CodeGen/X86/late-remat-update.mir
index 84a78f8..dd4e99c 100644
--- a/llvm/test/CodeGen/X86/late-remat-update.mir
+++ b/llvm/test/CodeGen/X86/late-remat-update.mir
@@ -66,6 +66,7 @@ registers:
liveins:
- { reg: '$edi', virtual-reg: '%0' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/limit-split-cost.mir b/llvm/test/CodeGen/X86/limit-split-cost.mir
index 6f5329e..7ec0404 100644
--- a/llvm/test/CodeGen/X86/limit-split-cost.mir
+++ b/llvm/test/CodeGen/X86/limit-split-cost.mir
@@ -86,6 +86,7 @@ registers:
liveins:
- { reg: '$edi', virtual-reg: '%0' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/masked_store.ll b/llvm/test/CodeGen/X86/masked_store.ll
index 898b34e..6aa0a81 100644
--- a/llvm/test/CodeGen/X86/masked_store.ll
+++ b/llvm/test/CodeGen/X86/masked_store.ll
@@ -12,7 +12,7 @@
; vXf64
;
-define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val) {
+define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val) nounwind {
; SSE-LABEL: store_v1f64_v1i64:
; SSE: ## %bb.0:
; SSE-NEXT: testq %rdi, %rdi
@@ -46,7 +46,7 @@ define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val)
ret void
}
-define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val) {
+define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val) nounwind {
; SSE-LABEL: store_v2f64_v2i64:
; SSE: ## %bb.0:
; SSE-NEXT: movmskpd %xmm0, %eax
@@ -106,7 +106,7 @@ define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val)
ret void
}
-define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val) {
+define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val) nounwind {
; SSE2-LABEL: store_v4f64_v4i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -222,7 +222,7 @@ define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val)
; vXf32
;
-define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val) {
+define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val) nounwind {
; SSE2-LABEL: store_v2f32_v2i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -314,7 +314,7 @@ define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val)
ret void
}
-define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i32> %mask) {
+define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i32> %mask) nounwind {
; SSE2-LABEL: store_v4f32_v4i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm2, %eax
@@ -425,7 +425,7 @@ define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i3
ret void
}
-define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i32> %mask) {
+define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i32> %mask) nounwind {
; SSE2-LABEL: store_v8f32_v8i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: packssdw %xmm5, %xmm4
@@ -605,7 +605,7 @@ define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i3
ret void
}
-define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16 x i32> %mask) {
+define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16 x i32> %mask) nounwind {
; SSE2-LABEL: store_v16f32_v16i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4
@@ -914,7 +914,7 @@ define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16
; vXi64
;
-define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) {
+define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) nounwind {
; SSE2-LABEL: store_v2i64_v2i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskpd %xmm0, %eax
@@ -998,7 +998,7 @@ define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) {
ret void
}
-define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) {
+define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) nounwind {
; SSE2-LABEL: store_v4i64_v4i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -1122,7 +1122,7 @@ define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) {
; vXi32
;
-define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) {
+define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) nounwind {
; SSE-LABEL: store_v1i32_v1i32:
; SSE: ## %bb.0:
; SSE-NEXT: testl %edi, %edi
@@ -1156,7 +1156,7 @@ define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) {
ret void
}
-define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) {
+define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) nounwind {
; SSE2-LABEL: store_v2i32_v2i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -1256,7 +1256,7 @@ define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) {
ret void
}
-define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
+define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) nounwind {
; SSE2-LABEL: store_v4i32_v4i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1370,7 +1370,7 @@ define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
ret void
}
-define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) {
+define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) nounwind {
; SSE2-LABEL: store_v8i32_v8i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -1560,7 +1560,7 @@ define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) {
; vXi16
;
-define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) {
+define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) nounwind {
; SSE2-LABEL: store_v8i16_v8i16:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1907,7 +1907,7 @@ define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) {
ret void
}
-define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val) {
+define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val) nounwind {
; SSE2-LABEL: store_v16i16_v16i16:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -2676,7 +2676,7 @@ define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val
; vXi8
;
-define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) {
+define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) nounwind {
; SSE2-LABEL: store_v16i8_v16i8:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -3273,7 +3273,7 @@ define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) {
ret void
}
-define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
+define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) nounwind {
; SSE2-LABEL: store_v32i8_v32i8:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -4670,7 +4670,7 @@ define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
;;; Stores with Constant Masks
-define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
+define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) nounwind {
; SSE-LABEL: mstore_constmask_v4i32_v4i32:
; SSE: ## %bb.0:
; SSE-NEXT: movups %xmm1, (%rdi)
@@ -4693,7 +4693,7 @@ define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i3
; Make sure we are able to detect all ones constant mask after type legalization
; to avoid masked stores.
-define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16 x i64> %val) {
+define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16 x i64> %val) nounwind {
; SSE2-LABEL: mstore_constmask_allones_split:
; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
@@ -4810,7 +4810,7 @@ define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16
; When only one element of the mask is set, reduce to a scalar store.
-define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) {
+define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) nounwind {
; SSE-LABEL: one_mask_bit_set1:
; SSE: ## %bb.0:
; SSE-NEXT: movss %xmm0, (%rdi)
@@ -4832,7 +4832,7 @@ define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) {
; Choose a different element to show that the correct address offset is produced.
-define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) {
+define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) nounwind {
; SSE2-LABEL: one_mask_bit_set2:
; SSE2: ## %bb.0:
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -4860,7 +4860,7 @@ define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) {
; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
-define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) {
+define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) nounwind {
; SSE-LABEL: one_mask_bit_set3:
; SSE: ## %bb.0:
; SSE-NEXT: movlps %xmm1, 16(%rdi)
@@ -4886,7 +4886,7 @@ define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) {
; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
-define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) {
+define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) nounwind {
; SSE-LABEL: one_mask_bit_set4:
; SSE: ## %bb.0:
; SSE-NEXT: movhps %xmm1, 24(%rdi)
@@ -4912,7 +4912,7 @@ define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) {
; Try a 512-bit vector to make sure AVX doesn't die and AVX512 works as expected.
-define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) {
+define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) nounwind {
; SSE-LABEL: one_mask_bit_set5:
; SSE: ## %bb.0:
; SSE-NEXT: movlps %xmm3, 48(%rdi)
@@ -4944,7 +4944,7 @@ define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) {
}
; Try one elt in each half of a vector that needs to split
-define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) {
+define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) nounwind {
; SSE2-LABEL: one_mask_bit_set6:
; SSE2: ## %bb.0:
; SSE2-NEXT: movlps %xmm3, 48(%rdi)
@@ -4999,7 +4999,7 @@ define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) {
ret void
}
-define void @top_bits_unset_stack() {
+define void @top_bits_unset_stack() nounwind {
; SSE-LABEL: top_bits_unset_stack:
; SSE: ## %bb.0: ## %entry
; SSE-NEXT: xorps %xmm0, %xmm0
@@ -5047,7 +5047,6 @@ define void @top_bits_unset_stack() {
; X86-AVX512-LABEL: top_bits_unset_stack:
; X86-AVX512: ## %bb.0: ## %entry
; X86-AVX512-NEXT: subl $76, %esp
-; X86-AVX512-NEXT: .cfi_def_cfa_offset 80
; X86-AVX512-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X86-AVX512-NEXT: movb $63, %al
; X86-AVX512-NEXT: kmovd %eax, %k1
@@ -5064,7 +5063,7 @@ entry:
; SimplifyDemandedBits eliminates an ashr here.
-define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <4 x i32> %masksrc) {
+define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <4 x i32> %masksrc) nounwind {
; SSE-LABEL: masked_store_bool_mask_demand_trunc_sext:
; SSE: ## %bb.0:
; SSE-NEXT: pslld $31, %xmm2
@@ -5160,7 +5159,7 @@ define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <
; PR26697
-define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %mask) {
+define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %mask) nounwind {
; SSE2-LABEL: one_mask_bit_set1_variable:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm1, %eax
@@ -5267,7 +5266,7 @@ define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %
; This needs to be widened to v4i32.
; This used to assert in type legalization. PR38436
; FIXME: The codegen for AVX512 should use KSHIFT to zero the upper bits of the mask.
-define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
+define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) nounwind {
; SSE2-LABEL: widen_masked_store:
; SSE2: ## %bb.0:
; SSE2-NEXT: andb $1, %sil
@@ -5448,7 +5447,7 @@ define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
ret void
}
-define void @zero_mask(ptr %addr, <2 x double> %val) {
+define void @zero_mask(ptr %addr, <2 x double> %val) nounwind {
; SSE-LABEL: zero_mask:
; SSE: ## %bb.0:
; SSE-NEXT: retq
@@ -5464,7 +5463,7 @@ define void @zero_mask(ptr %addr, <2 x double> %val) {
ret void
}
-define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask) {
+define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask) nounwind {
; SSE2-LABEL: PR11210:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm2, %eax
@@ -5638,492 +5637,248 @@ define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask)
ret void
}
-define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigger.ptr, ptr %val.ptr, ptr %dst) {
-; SSE2-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
-; SSE2: ## %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm6
-; SSE2-NEXT: movdqa 32(%rdi), %xmm7
-; SSE2-NEXT: movdqa 64(%rdi), %xmm8
-; SSE2-NEXT: movl 80(%rsi), %eax
-; SSE2-NEXT: movl 64(%rsi), %r8d
-; SSE2-NEXT: movl 48(%rsi), %r9d
-; SSE2-NEXT: movl 32(%rsi), %r10d
-; SSE2-NEXT: movl 16(%rsi), %r11d
-; SSE2-NEXT: movdqa 80(%rsi), %xmm0
-; SSE2-NEXT: movdqa 64(%rsi), %xmm1
-; SSE2-NEXT: movdqa 48(%rsi), %xmm2
-; SSE2-NEXT: movdqa 32(%rsi), %xmm3
-; SSE2-NEXT: movdqa 16(%rsi), %xmm4
-; SSE2-NEXT: movdqa (%rsi), %xmm5
-; SSE2-NEXT: packssdw 48(%rdi), %xmm7
-; SSE2-NEXT: packssdw 16(%rdi), %xmm6
-; SSE2-NEXT: packsswb %xmm7, %xmm6
-; SSE2-NEXT: packssdw 80(%rdi), %xmm8
-; SSE2-NEXT: packsswb %xmm8, %xmm8
-; SSE2-NEXT: pmovmskb %xmm6, %edi
-; SSE2-NEXT: andl $21845, %edi ## imm = 0x5555
-; SSE2-NEXT: pmovmskb %xmm8, %ecx
-; SSE2-NEXT: andl $85, %ecx
-; SSE2-NEXT: shll $16, %ecx
-; SSE2-NEXT: orl %edi, %ecx
-; SSE2-NEXT: testb $1, %cl
-; SSE2-NEXT: jne LBB31_1
-; SSE2-NEXT: ## %bb.2: ## %else
-; SSE2-NEXT: testb $2, %cl
-; SSE2-NEXT: jne LBB31_3
-; SSE2-NEXT: LBB31_4: ## %else2
-; SSE2-NEXT: testb $4, %cl
-; SSE2-NEXT: jne LBB31_5
-; SSE2-NEXT: LBB31_6: ## %else4
-; SSE2-NEXT: testb $8, %cl
-; SSE2-NEXT: jne LBB31_7
-; SSE2-NEXT: LBB31_8: ## %else6
-; SSE2-NEXT: testb $16, %cl
-; SSE2-NEXT: jne LBB31_9
-; SSE2-NEXT: LBB31_10: ## %else8
-; SSE2-NEXT: testb $32, %cl
-; SSE2-NEXT: jne LBB31_11
-; SSE2-NEXT: LBB31_12: ## %else10
-; SSE2-NEXT: testb $64, %cl
-; SSE2-NEXT: jne LBB31_13
-; SSE2-NEXT: LBB31_14: ## %else12
-; SSE2-NEXT: testb %cl, %cl
-; SSE2-NEXT: js LBB31_15
-; SSE2-NEXT: LBB31_16: ## %else14
-; SSE2-NEXT: testl $256, %ecx ## imm = 0x100
-; SSE2-NEXT: jne LBB31_17
-; SSE2-NEXT: LBB31_18: ## %else16
-; SSE2-NEXT: testl $512, %ecx ## imm = 0x200
-; SSE2-NEXT: jne LBB31_19
-; SSE2-NEXT: LBB31_20: ## %else18
-; SSE2-NEXT: testl $1024, %ecx ## imm = 0x400
-; SSE2-NEXT: jne LBB31_21
-; SSE2-NEXT: LBB31_22: ## %else20
-; SSE2-NEXT: testl $2048, %ecx ## imm = 0x800
-; SSE2-NEXT: jne LBB31_23
-; SSE2-NEXT: LBB31_24: ## %else22
-; SSE2-NEXT: testl $4096, %ecx ## imm = 0x1000
-; SSE2-NEXT: jne LBB31_25
-; SSE2-NEXT: LBB31_26: ## %else24
-; SSE2-NEXT: testl $8192, %ecx ## imm = 0x2000
-; SSE2-NEXT: jne LBB31_27
-; SSE2-NEXT: LBB31_28: ## %else26
-; SSE2-NEXT: testl $16384, %ecx ## imm = 0x4000
-; SSE2-NEXT: jne LBB31_29
-; SSE2-NEXT: LBB31_30: ## %else28
-; SSE2-NEXT: testw %cx, %cx
-; SSE2-NEXT: js LBB31_31
-; SSE2-NEXT: LBB31_32: ## %else30
-; SSE2-NEXT: testl $65536, %ecx ## imm = 0x10000
-; SSE2-NEXT: jne LBB31_33
-; SSE2-NEXT: LBB31_34: ## %else32
-; SSE2-NEXT: testl $131072, %ecx ## imm = 0x20000
-; SSE2-NEXT: jne LBB31_35
-; SSE2-NEXT: LBB31_36: ## %else34
-; SSE2-NEXT: testl $262144, %ecx ## imm = 0x40000
-; SSE2-NEXT: jne LBB31_37
-; SSE2-NEXT: LBB31_38: ## %else36
-; SSE2-NEXT: testl $524288, %ecx ## imm = 0x80000
-; SSE2-NEXT: jne LBB31_39
-; SSE2-NEXT: LBB31_40: ## %else38
-; SSE2-NEXT: testl $1048576, %ecx ## imm = 0x100000
-; SSE2-NEXT: jne LBB31_41
-; SSE2-NEXT: LBB31_42: ## %else40
-; SSE2-NEXT: testl $2097152, %ecx ## imm = 0x200000
-; SSE2-NEXT: jne LBB31_43
-; SSE2-NEXT: LBB31_44: ## %else42
-; SSE2-NEXT: testl $4194304, %ecx ## imm = 0x400000
-; SSE2-NEXT: je LBB31_46
-; SSE2-NEXT: LBB31_45: ## %cond.store43
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movl %eax, 88(%rdx)
-; SSE2-NEXT: LBB31_46: ## %else44
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne LBB31_48
-; SSE2-NEXT: ## %bb.47: ## %cond.store45
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: movl %eax, 92(%rdx)
-; SSE2-NEXT: LBB31_48: ## %else46
-; SSE2-NEXT: retq
-; SSE2-NEXT: LBB31_1: ## %cond.store
-; SSE2-NEXT: movl (%rsi), %esi
-; SSE2-NEXT: movl %esi, (%rdx)
-; SSE2-NEXT: testb $2, %cl
-; SSE2-NEXT: je LBB31_4
-; SSE2-NEXT: LBB31_3: ## %cond.store1
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,1,1]
-; SSE2-NEXT: movd %xmm6, %esi
-; SSE2-NEXT: movl %esi, 4(%rdx)
-; SSE2-NEXT: testb $4, %cl
-; SSE2-NEXT: je LBB31_6
-; SSE2-NEXT: LBB31_5: ## %cond.store3
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
-; SSE2-NEXT: movd %xmm6, %esi
-; SSE2-NEXT: movl %esi, 8(%rdx)
-; SSE2-NEXT: testb $8, %cl
-; SSE2-NEXT: je LBB31_8
-; SSE2-NEXT: LBB31_7: ## %cond.store5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,3,3,3]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 12(%rdx)
-; SSE2-NEXT: testb $16, %cl
-; SSE2-NEXT: je LBB31_10
-; SSE2-NEXT: LBB31_9: ## %cond.store7
-; SSE2-NEXT: movl %r11d, 16(%rdx)
-; SSE2-NEXT: testb $32, %cl
-; SSE2-NEXT: je LBB31_12
-; SSE2-NEXT: LBB31_11: ## %cond.store9
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,1,1]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 20(%rdx)
-; SSE2-NEXT: testb $64, %cl
-; SSE2-NEXT: je LBB31_14
-; SSE2-NEXT: LBB31_13: ## %cond.store11
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 24(%rdx)
-; SSE2-NEXT: testb %cl, %cl
-; SSE2-NEXT: jns LBB31_16
-; SSE2-NEXT: LBB31_15: ## %cond.store13
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 28(%rdx)
-; SSE2-NEXT: testl $256, %ecx ## imm = 0x100
-; SSE2-NEXT: je LBB31_18
-; SSE2-NEXT: LBB31_17: ## %cond.store15
-; SSE2-NEXT: movl %r10d, 32(%rdx)
-; SSE2-NEXT: testl $512, %ecx ## imm = 0x200
-; SSE2-NEXT: je LBB31_20
-; SSE2-NEXT: LBB31_19: ## %cond.store17
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,1,1]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 36(%rdx)
-; SSE2-NEXT: testl $1024, %ecx ## imm = 0x400
-; SSE2-NEXT: je LBB31_22
-; SSE2-NEXT: LBB31_21: ## %cond.store19
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 40(%rdx)
-; SSE2-NEXT: testl $2048, %ecx ## imm = 0x800
-; SSE2-NEXT: je LBB31_24
-; SSE2-NEXT: LBB31_23: ## %cond.store21
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 44(%rdx)
-; SSE2-NEXT: testl $4096, %ecx ## imm = 0x1000
-; SSE2-NEXT: je LBB31_26
-; SSE2-NEXT: LBB31_25: ## %cond.store23
-; SSE2-NEXT: movl %r9d, 48(%rdx)
-; SSE2-NEXT: testl $8192, %ecx ## imm = 0x2000
-; SSE2-NEXT: je LBB31_28
-; SSE2-NEXT: LBB31_27: ## %cond.store25
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 52(%rdx)
-; SSE2-NEXT: testl $16384, %ecx ## imm = 0x4000
-; SSE2-NEXT: je LBB31_30
-; SSE2-NEXT: LBB31_29: ## %cond.store27
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 56(%rdx)
-; SSE2-NEXT: testw %cx, %cx
-; SSE2-NEXT: jns LBB31_32
-; SSE2-NEXT: LBB31_31: ## %cond.store29
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 60(%rdx)
-; SSE2-NEXT: testl $65536, %ecx ## imm = 0x10000
-; SSE2-NEXT: je LBB31_34
-; SSE2-NEXT: LBB31_33: ## %cond.store31
-; SSE2-NEXT: movl %r8d, 64(%rdx)
-; SSE2-NEXT: testl $131072, %ecx ## imm = 0x20000
-; SSE2-NEXT: je LBB31_36
-; SSE2-NEXT: LBB31_35: ## %cond.store33
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 68(%rdx)
-; SSE2-NEXT: testl $262144, %ecx ## imm = 0x40000
-; SSE2-NEXT: je LBB31_38
-; SSE2-NEXT: LBB31_37: ## %cond.store35
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 72(%rdx)
-; SSE2-NEXT: testl $524288, %ecx ## imm = 0x80000
-; SSE2-NEXT: je LBB31_40
-; SSE2-NEXT: LBB31_39: ## %cond.store37
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE2-NEXT: movd %xmm1, %esi
-; SSE2-NEXT: movl %esi, 76(%rdx)
-; SSE2-NEXT: testl $1048576, %ecx ## imm = 0x100000
-; SSE2-NEXT: je LBB31_42
-; SSE2-NEXT: LBB31_41: ## %cond.store39
-; SSE2-NEXT: movl %eax, 80(%rdx)
-; SSE2-NEXT: testl $2097152, %ecx ## imm = 0x200000
-; SSE2-NEXT: je LBB31_44
-; SSE2-NEXT: LBB31_43: ## %cond.store41
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movl %eax, 84(%rdx)
-; SSE2-NEXT: testl $4194304, %ecx ## imm = 0x400000
-; SSE2-NEXT: jne LBB31_45
-; SSE2-NEXT: jmp LBB31_46
-;
-; SSE4-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
-; SSE4: ## %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: .cfi_def_cfa_offset 16
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: .cfi_def_cfa_offset 24
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: .cfi_def_cfa_offset 32
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: .cfi_def_cfa_offset 40
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: .cfi_def_cfa_offset 48
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: .cfi_def_cfa_offset 56
-; SSE4-NEXT: .cfi_offset %rbx, -56
-; SSE4-NEXT: .cfi_offset %r12, -48
-; SSE4-NEXT: .cfi_offset %r13, -40
-; SSE4-NEXT: .cfi_offset %r14, -32
-; SSE4-NEXT: .cfi_offset %r15, -24
-; SSE4-NEXT: .cfi_offset %rbp, -16
-; SSE4-NEXT: movdqa (%rdi), %xmm1
-; SSE4-NEXT: movdqa 32(%rdi), %xmm2
-; SSE4-NEXT: movdqa 64(%rdi), %xmm0
-; SSE4-NEXT: movl 92(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 88(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 84(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 80(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 76(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 72(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 68(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 64(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 60(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 56(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 52(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: packssdw 48(%rdi), %xmm2
-; SSE4-NEXT: packssdw 16(%rdi), %xmm1
-; SSE4-NEXT: packsswb %xmm2, %xmm1
-; SSE4-NEXT: packssdw 80(%rdi), %xmm0
-; SSE4-NEXT: packsswb %xmm0, %xmm0
-; SSE4-NEXT: pmovmskb %xmm1, %eax
-; SSE4-NEXT: andl $21845, %eax ## imm = 0x5555
-; SSE4-NEXT: pmovmskb %xmm0, %edi
-; SSE4-NEXT: andl $85, %edi
-; SSE4-NEXT: shll $16, %edi
-; SSE4-NEXT: orl %eax, %edi
-; SSE4-NEXT: movl 48(%rsi), %r13d
-; SSE4-NEXT: testb $1, %dil
-; SSE4-NEXT: movl 44(%rsi), %eax
-; SSE4-NEXT: movl 40(%rsi), %ecx
-; SSE4-NEXT: movl 36(%rsi), %r8d
-; SSE4-NEXT: movl 32(%rsi), %r9d
-; SSE4-NEXT: movl 28(%rsi), %r10d
-; SSE4-NEXT: movl 24(%rsi), %r11d
-; SSE4-NEXT: movl 20(%rsi), %ebx
-; SSE4-NEXT: movl 16(%rsi), %ebp
-; SSE4-NEXT: movl 12(%rsi), %r14d
-; SSE4-NEXT: movl 8(%rsi), %r15d
-; SSE4-NEXT: movl 4(%rsi), %r12d
-; SSE4-NEXT: jne LBB31_1
-; SSE4-NEXT: ## %bb.2: ## %else
-; SSE4-NEXT: testb $2, %dil
-; SSE4-NEXT: jne LBB31_3
-; SSE4-NEXT: LBB31_4: ## %else2
-; SSE4-NEXT: testb $4, %dil
-; SSE4-NEXT: jne LBB31_5
-; SSE4-NEXT: LBB31_6: ## %else4
-; SSE4-NEXT: testb $8, %dil
-; SSE4-NEXT: jne LBB31_7
-; SSE4-NEXT: LBB31_8: ## %else6
-; SSE4-NEXT: testb $16, %dil
-; SSE4-NEXT: jne LBB31_9
-; SSE4-NEXT: LBB31_10: ## %else8
-; SSE4-NEXT: testb $32, %dil
-; SSE4-NEXT: jne LBB31_11
-; SSE4-NEXT: LBB31_12: ## %else10
-; SSE4-NEXT: testb $64, %dil
-; SSE4-NEXT: jne LBB31_13
-; SSE4-NEXT: LBB31_14: ## %else12
-; SSE4-NEXT: testb %dil, %dil
-; SSE4-NEXT: js LBB31_15
-; SSE4-NEXT: LBB31_16: ## %else14
-; SSE4-NEXT: testl $256, %edi ## imm = 0x100
-; SSE4-NEXT: jne LBB31_17
-; SSE4-NEXT: LBB31_18: ## %else16
-; SSE4-NEXT: testl $512, %edi ## imm = 0x200
-; SSE4-NEXT: jne LBB31_19
-; SSE4-NEXT: LBB31_20: ## %else18
-; SSE4-NEXT: testl $1024, %edi ## imm = 0x400
-; SSE4-NEXT: jne LBB31_21
-; SSE4-NEXT: LBB31_22: ## %else20
-; SSE4-NEXT: testl $2048, %edi ## imm = 0x800
-; SSE4-NEXT: jne LBB31_23
-; SSE4-NEXT: LBB31_24: ## %else22
-; SSE4-NEXT: testl $4096, %edi ## imm = 0x1000
-; SSE4-NEXT: jne LBB31_25
-; SSE4-NEXT: LBB31_26: ## %else24
-; SSE4-NEXT: testl $8192, %edi ## imm = 0x2000
-; SSE4-NEXT: jne LBB31_27
-; SSE4-NEXT: LBB31_28: ## %else26
-; SSE4-NEXT: testl $16384, %edi ## imm = 0x4000
-; SSE4-NEXT: jne LBB31_29
-; SSE4-NEXT: LBB31_30: ## %else28
-; SSE4-NEXT: testw %di, %di
-; SSE4-NEXT: js LBB31_31
-; SSE4-NEXT: LBB31_32: ## %else30
-; SSE4-NEXT: testl $65536, %edi ## imm = 0x10000
-; SSE4-NEXT: jne LBB31_33
-; SSE4-NEXT: LBB31_34: ## %else32
-; SSE4-NEXT: testl $131072, %edi ## imm = 0x20000
-; SSE4-NEXT: jne LBB31_35
-; SSE4-NEXT: LBB31_36: ## %else34
-; SSE4-NEXT: testl $262144, %edi ## imm = 0x40000
-; SSE4-NEXT: jne LBB31_37
-; SSE4-NEXT: LBB31_38: ## %else36
-; SSE4-NEXT: testl $524288, %edi ## imm = 0x80000
-; SSE4-NEXT: jne LBB31_39
-; SSE4-NEXT: LBB31_40: ## %else38
-; SSE4-NEXT: testl $1048576, %edi ## imm = 0x100000
-; SSE4-NEXT: jne LBB31_41
-; SSE4-NEXT: LBB31_42: ## %else40
-; SSE4-NEXT: testl $2097152, %edi ## imm = 0x200000
-; SSE4-NEXT: jne LBB31_43
-; SSE4-NEXT: LBB31_44: ## %else42
-; SSE4-NEXT: testl $4194304, %edi ## imm = 0x400000
-; SSE4-NEXT: je LBB31_46
-; SSE4-NEXT: LBB31_45: ## %cond.store43
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 88(%rdx)
-; SSE4-NEXT: LBB31_46: ## %else44
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: testb %al, %al
-; SSE4-NEXT: jne LBB31_48
-; SSE4-NEXT: ## %bb.47: ## %cond.store45
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 92(%rdx)
-; SSE4-NEXT: LBB31_48: ## %else46
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
-; SSE4-NEXT: retq
-; SSE4-NEXT: LBB31_1: ## %cond.store
-; SSE4-NEXT: movl (%rsi), %esi
-; SSE4-NEXT: movl %esi, (%rdx)
-; SSE4-NEXT: testb $2, %dil
-; SSE4-NEXT: je LBB31_4
-; SSE4-NEXT: LBB31_3: ## %cond.store1
-; SSE4-NEXT: movl %r12d, 4(%rdx)
-; SSE4-NEXT: testb $4, %dil
-; SSE4-NEXT: je LBB31_6
-; SSE4-NEXT: LBB31_5: ## %cond.store3
-; SSE4-NEXT: movl %r15d, 8(%rdx)
-; SSE4-NEXT: testb $8, %dil
-; SSE4-NEXT: je LBB31_8
-; SSE4-NEXT: LBB31_7: ## %cond.store5
-; SSE4-NEXT: movl %r14d, 12(%rdx)
-; SSE4-NEXT: testb $16, %dil
-; SSE4-NEXT: je LBB31_10
-; SSE4-NEXT: LBB31_9: ## %cond.store7
-; SSE4-NEXT: movl %ebp, 16(%rdx)
-; SSE4-NEXT: testb $32, %dil
-; SSE4-NEXT: je LBB31_12
-; SSE4-NEXT: LBB31_11: ## %cond.store9
-; SSE4-NEXT: movl %ebx, 20(%rdx)
-; SSE4-NEXT: testb $64, %dil
-; SSE4-NEXT: je LBB31_14
-; SSE4-NEXT: LBB31_13: ## %cond.store11
-; SSE4-NEXT: movl %r11d, 24(%rdx)
-; SSE4-NEXT: testb %dil, %dil
-; SSE4-NEXT: jns LBB31_16
-; SSE4-NEXT: LBB31_15: ## %cond.store13
-; SSE4-NEXT: movl %r10d, 28(%rdx)
-; SSE4-NEXT: testl $256, %edi ## imm = 0x100
-; SSE4-NEXT: je LBB31_18
-; SSE4-NEXT: LBB31_17: ## %cond.store15
-; SSE4-NEXT: movl %r9d, 32(%rdx)
-; SSE4-NEXT: testl $512, %edi ## imm = 0x200
-; SSE4-NEXT: je LBB31_20
-; SSE4-NEXT: LBB31_19: ## %cond.store17
-; SSE4-NEXT: movl %r8d, 36(%rdx)
-; SSE4-NEXT: testl $1024, %edi ## imm = 0x400
-; SSE4-NEXT: je LBB31_22
-; SSE4-NEXT: LBB31_21: ## %cond.store19
-; SSE4-NEXT: movl %ecx, 40(%rdx)
-; SSE4-NEXT: testl $2048, %edi ## imm = 0x800
-; SSE4-NEXT: je LBB31_24
-; SSE4-NEXT: LBB31_23: ## %cond.store21
-; SSE4-NEXT: movl %eax, 44(%rdx)
-; SSE4-NEXT: testl $4096, %edi ## imm = 0x1000
-; SSE4-NEXT: je LBB31_26
-; SSE4-NEXT: LBB31_25: ## %cond.store23
-; SSE4-NEXT: movl %r13d, 48(%rdx)
-; SSE4-NEXT: testl $8192, %edi ## imm = 0x2000
-; SSE4-NEXT: je LBB31_28
-; SSE4-NEXT: LBB31_27: ## %cond.store25
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 52(%rdx)
-; SSE4-NEXT: testl $16384, %edi ## imm = 0x4000
-; SSE4-NEXT: je LBB31_30
-; SSE4-NEXT: LBB31_29: ## %cond.store27
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 56(%rdx)
-; SSE4-NEXT: testw %di, %di
-; SSE4-NEXT: jns LBB31_32
-; SSE4-NEXT: LBB31_31: ## %cond.store29
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 60(%rdx)
-; SSE4-NEXT: testl $65536, %edi ## imm = 0x10000
-; SSE4-NEXT: je LBB31_34
-; SSE4-NEXT: LBB31_33: ## %cond.store31
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 64(%rdx)
-; SSE4-NEXT: testl $131072, %edi ## imm = 0x20000
-; SSE4-NEXT: je LBB31_36
-; SSE4-NEXT: LBB31_35: ## %cond.store33
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 68(%rdx)
-; SSE4-NEXT: testl $262144, %edi ## imm = 0x40000
-; SSE4-NEXT: je LBB31_38
-; SSE4-NEXT: LBB31_37: ## %cond.store35
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 72(%rdx)
-; SSE4-NEXT: testl $524288, %edi ## imm = 0x80000
-; SSE4-NEXT: je LBB31_40
-; SSE4-NEXT: LBB31_39: ## %cond.store37
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 76(%rdx)
-; SSE4-NEXT: testl $1048576, %edi ## imm = 0x100000
-; SSE4-NEXT: je LBB31_42
-; SSE4-NEXT: LBB31_41: ## %cond.store39
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 80(%rdx)
-; SSE4-NEXT: testl $2097152, %edi ## imm = 0x200000
-; SSE4-NEXT: je LBB31_44
-; SSE4-NEXT: LBB31_43: ## %cond.store41
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 84(%rdx)
-; SSE4-NEXT: testl $4194304, %edi ## imm = 0x400000
-; SSE4-NEXT: jne LBB31_45
-; SSE4-NEXT: jmp LBB31_46
+define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigger.ptr, ptr %val.ptr, ptr %dst) nounwind {
+; SSE-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
+; SSE: ## %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movdqa (%rdi), %xmm1
+; SSE-NEXT: movdqa 32(%rdi), %xmm2
+; SSE-NEXT: movdqa 64(%rdi), %xmm0
+; SSE-NEXT: movl 92(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 88(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 84(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 80(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 76(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 72(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 68(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 64(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 60(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 56(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 52(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: packssdw 48(%rdi), %xmm2
+; SSE-NEXT: packssdw 16(%rdi), %xmm1
+; SSE-NEXT: packsswb %xmm2, %xmm1
+; SSE-NEXT: packssdw 80(%rdi), %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: andl $21845, %eax ## imm = 0x5555
+; SSE-NEXT: pmovmskb %xmm0, %edi
+; SSE-NEXT: andl $85, %edi
+; SSE-NEXT: shll $16, %edi
+; SSE-NEXT: orl %eax, %edi
+; SSE-NEXT: movl 48(%rsi), %r13d
+; SSE-NEXT: testb $1, %dil
+; SSE-NEXT: movl 44(%rsi), %eax
+; SSE-NEXT: movl 40(%rsi), %ecx
+; SSE-NEXT: movl 36(%rsi), %r8d
+; SSE-NEXT: movl 32(%rsi), %r9d
+; SSE-NEXT: movl 28(%rsi), %r10d
+; SSE-NEXT: movl 24(%rsi), %r11d
+; SSE-NEXT: movl 20(%rsi), %ebx
+; SSE-NEXT: movl 16(%rsi), %ebp
+; SSE-NEXT: movl 12(%rsi), %r14d
+; SSE-NEXT: movl 8(%rsi), %r15d
+; SSE-NEXT: movl 4(%rsi), %r12d
+; SSE-NEXT: jne LBB31_1
+; SSE-NEXT: ## %bb.2: ## %else
+; SSE-NEXT: testb $2, %dil
+; SSE-NEXT: jne LBB31_3
+; SSE-NEXT: LBB31_4: ## %else2
+; SSE-NEXT: testb $4, %dil
+; SSE-NEXT: jne LBB31_5
+; SSE-NEXT: LBB31_6: ## %else4
+; SSE-NEXT: testb $8, %dil
+; SSE-NEXT: jne LBB31_7
+; SSE-NEXT: LBB31_8: ## %else6
+; SSE-NEXT: testb $16, %dil
+; SSE-NEXT: jne LBB31_9
+; SSE-NEXT: LBB31_10: ## %else8
+; SSE-NEXT: testb $32, %dil
+; SSE-NEXT: jne LBB31_11
+; SSE-NEXT: LBB31_12: ## %else10
+; SSE-NEXT: testb $64, %dil
+; SSE-NEXT: jne LBB31_13
+; SSE-NEXT: LBB31_14: ## %else12
+; SSE-NEXT: testb %dil, %dil
+; SSE-NEXT: js LBB31_15
+; SSE-NEXT: LBB31_16: ## %else14
+; SSE-NEXT: testl $256, %edi ## imm = 0x100
+; SSE-NEXT: jne LBB31_17
+; SSE-NEXT: LBB31_18: ## %else16
+; SSE-NEXT: testl $512, %edi ## imm = 0x200
+; SSE-NEXT: jne LBB31_19
+; SSE-NEXT: LBB31_20: ## %else18
+; SSE-NEXT: testl $1024, %edi ## imm = 0x400
+; SSE-NEXT: jne LBB31_21
+; SSE-NEXT: LBB31_22: ## %else20
+; SSE-NEXT: testl $2048, %edi ## imm = 0x800
+; SSE-NEXT: jne LBB31_23
+; SSE-NEXT: LBB31_24: ## %else22
+; SSE-NEXT: testl $4096, %edi ## imm = 0x1000
+; SSE-NEXT: jne LBB31_25
+; SSE-NEXT: LBB31_26: ## %else24
+; SSE-NEXT: testl $8192, %edi ## imm = 0x2000
+; SSE-NEXT: jne LBB31_27
+; SSE-NEXT: LBB31_28: ## %else26
+; SSE-NEXT: testl $16384, %edi ## imm = 0x4000
+; SSE-NEXT: jne LBB31_29
+; SSE-NEXT: LBB31_30: ## %else28
+; SSE-NEXT: testw %di, %di
+; SSE-NEXT: js LBB31_31
+; SSE-NEXT: LBB31_32: ## %else30
+; SSE-NEXT: testl $65536, %edi ## imm = 0x10000
+; SSE-NEXT: jne LBB31_33
+; SSE-NEXT: LBB31_34: ## %else32
+; SSE-NEXT: testl $131072, %edi ## imm = 0x20000
+; SSE-NEXT: jne LBB31_35
+; SSE-NEXT: LBB31_36: ## %else34
+; SSE-NEXT: testl $262144, %edi ## imm = 0x40000
+; SSE-NEXT: jne LBB31_37
+; SSE-NEXT: LBB31_38: ## %else36
+; SSE-NEXT: testl $524288, %edi ## imm = 0x80000
+; SSE-NEXT: jne LBB31_39
+; SSE-NEXT: LBB31_40: ## %else38
+; SSE-NEXT: testl $1048576, %edi ## imm = 0x100000
+; SSE-NEXT: jne LBB31_41
+; SSE-NEXT: LBB31_42: ## %else40
+; SSE-NEXT: testl $2097152, %edi ## imm = 0x200000
+; SSE-NEXT: jne LBB31_43
+; SSE-NEXT: LBB31_44: ## %else42
+; SSE-NEXT: testl $4194304, %edi ## imm = 0x400000
+; SSE-NEXT: je LBB31_46
+; SSE-NEXT: LBB31_45: ## %cond.store43
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 88(%rdx)
+; SSE-NEXT: LBB31_46: ## %else44
+; SSE-NEXT: movb $1, %al
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: jne LBB31_48
+; SSE-NEXT: ## %bb.47: ## %cond.store45
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 92(%rdx)
+; SSE-NEXT: LBB31_48: ## %else46
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+; SSE-NEXT: LBB31_1: ## %cond.store
+; SSE-NEXT: movl (%rsi), %esi
+; SSE-NEXT: movl %esi, (%rdx)
+; SSE-NEXT: testb $2, %dil
+; SSE-NEXT: je LBB31_4
+; SSE-NEXT: LBB31_3: ## %cond.store1
+; SSE-NEXT: movl %r12d, 4(%rdx)
+; SSE-NEXT: testb $4, %dil
+; SSE-NEXT: je LBB31_6
+; SSE-NEXT: LBB31_5: ## %cond.store3
+; SSE-NEXT: movl %r15d, 8(%rdx)
+; SSE-NEXT: testb $8, %dil
+; SSE-NEXT: je LBB31_8
+; SSE-NEXT: LBB31_7: ## %cond.store5
+; SSE-NEXT: movl %r14d, 12(%rdx)
+; SSE-NEXT: testb $16, %dil
+; SSE-NEXT: je LBB31_10
+; SSE-NEXT: LBB31_9: ## %cond.store7
+; SSE-NEXT: movl %ebp, 16(%rdx)
+; SSE-NEXT: testb $32, %dil
+; SSE-NEXT: je LBB31_12
+; SSE-NEXT: LBB31_11: ## %cond.store9
+; SSE-NEXT: movl %ebx, 20(%rdx)
+; SSE-NEXT: testb $64, %dil
+; SSE-NEXT: je LBB31_14
+; SSE-NEXT: LBB31_13: ## %cond.store11
+; SSE-NEXT: movl %r11d, 24(%rdx)
+; SSE-NEXT: testb %dil, %dil
+; SSE-NEXT: jns LBB31_16
+; SSE-NEXT: LBB31_15: ## %cond.store13
+; SSE-NEXT: movl %r10d, 28(%rdx)
+; SSE-NEXT: testl $256, %edi ## imm = 0x100
+; SSE-NEXT: je LBB31_18
+; SSE-NEXT: LBB31_17: ## %cond.store15
+; SSE-NEXT: movl %r9d, 32(%rdx)
+; SSE-NEXT: testl $512, %edi ## imm = 0x200
+; SSE-NEXT: je LBB31_20
+; SSE-NEXT: LBB31_19: ## %cond.store17
+; SSE-NEXT: movl %r8d, 36(%rdx)
+; SSE-NEXT: testl $1024, %edi ## imm = 0x400
+; SSE-NEXT: je LBB31_22
+; SSE-NEXT: LBB31_21: ## %cond.store19
+; SSE-NEXT: movl %ecx, 40(%rdx)
+; SSE-NEXT: testl $2048, %edi ## imm = 0x800
+; SSE-NEXT: je LBB31_24
+; SSE-NEXT: LBB31_23: ## %cond.store21
+; SSE-NEXT: movl %eax, 44(%rdx)
+; SSE-NEXT: testl $4096, %edi ## imm = 0x1000
+; SSE-NEXT: je LBB31_26
+; SSE-NEXT: LBB31_25: ## %cond.store23
+; SSE-NEXT: movl %r13d, 48(%rdx)
+; SSE-NEXT: testl $8192, %edi ## imm = 0x2000
+; SSE-NEXT: je LBB31_28
+; SSE-NEXT: LBB31_27: ## %cond.store25
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 52(%rdx)
+; SSE-NEXT: testl $16384, %edi ## imm = 0x4000
+; SSE-NEXT: je LBB31_30
+; SSE-NEXT: LBB31_29: ## %cond.store27
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 56(%rdx)
+; SSE-NEXT: testw %di, %di
+; SSE-NEXT: jns LBB31_32
+; SSE-NEXT: LBB31_31: ## %cond.store29
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 60(%rdx)
+; SSE-NEXT: testl $65536, %edi ## imm = 0x10000
+; SSE-NEXT: je LBB31_34
+; SSE-NEXT: LBB31_33: ## %cond.store31
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 64(%rdx)
+; SSE-NEXT: testl $131072, %edi ## imm = 0x20000
+; SSE-NEXT: je LBB31_36
+; SSE-NEXT: LBB31_35: ## %cond.store33
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 68(%rdx)
+; SSE-NEXT: testl $262144, %edi ## imm = 0x40000
+; SSE-NEXT: je LBB31_38
+; SSE-NEXT: LBB31_37: ## %cond.store35
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 72(%rdx)
+; SSE-NEXT: testl $524288, %edi ## imm = 0x80000
+; SSE-NEXT: je LBB31_40
+; SSE-NEXT: LBB31_39: ## %cond.store37
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 76(%rdx)
+; SSE-NEXT: testl $1048576, %edi ## imm = 0x100000
+; SSE-NEXT: je LBB31_42
+; SSE-NEXT: LBB31_41: ## %cond.store39
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 80(%rdx)
+; SSE-NEXT: testl $2097152, %edi ## imm = 0x200000
+; SSE-NEXT: je LBB31_44
+; SSE-NEXT: LBB31_43: ## %cond.store41
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 84(%rdx)
+; SSE-NEXT: testl $4194304, %edi ## imm = 0x400000
+; SSE-NEXT: jne LBB31_45
+; SSE-NEXT: jmp LBB31_46
;
; AVX1-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
; AVX1: ## %bb.0:
@@ -6266,7 +6021,7 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
}
; From https://reviews.llvm.org/rGf8d9097168b7#1165311
-define void @undefshuffle(<8 x i1> %i0, ptr %src, ptr %dst) #0 {
+define void @undefshuffle(<8 x i1> %i0, ptr %src, ptr %dst) nounwind {
; SSE2-LABEL: undefshuffle:
; SSE2: ## %bb.0: ## %else
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 5da18ee..01056a8 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -2369,6 +2369,31 @@ define void @PR41097() {
ret void
}
+; FIXME - should use INSERTPS
+define <2 x float> @PR86068(<2 x float> %0, <2 x float> %1) {
+; SSE2-LABEL: PR86068:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,1]
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: PR86068:
+; SSE42: # %bb.0: # %entry
+; SSE42-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; SSE42-NEXT: retq
+;
+; AVX-LABEL: PR86068:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX-NEXT: retq
+entry:
+ %3 = shufflevector <2 x float> %1, <2 x float> poison, <2 x i32> <i32 1, i32 poison>
+ %4 = shufflevector <2 x float> %3, <2 x float> %0, <2 x i32> <i32 0, i32 3>
+ ret <2 x float> %4
+}
+
define void @D107009(ptr %input, ptr %output) {
; SSE-LABEL: D107009:
; SSE: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 1bd427c..81dafdf 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -489,7 +489,6 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: jb LBB1_4
; CHECK-NEXT: ## %bb.5: ## %bb9
; CHECK-NEXT: ## in Loop: Header=BB1_4 Depth=1
-; CHECK-NEXT: movl %edi, %ebx
; CHECK-NEXT: incl %ecx
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: addl %edi, %edx
diff --git a/llvm/test/CodeGen/X86/pr45378.ll b/llvm/test/CodeGen/X86/pr45378.ll
index 426f4ee..6a5770a 100644
--- a/llvm/test/CodeGen/X86/pr45378.ll
+++ b/llvm/test/CodeGen/X86/pr45378.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK,AVX
declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
@@ -71,28 +71,12 @@ define i1 @parseHeaders2_scalar_or(ptr %ptr) nounwind {
}
define i1 @parseHeaders2_scalar_and(ptr %ptr) nounwind {
-; SSE2-LABEL: parseHeaders2_scalar_and:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: testq %rax, (%rdi)
-; SSE2-NEXT: sete %al
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: parseHeaders2_scalar_and:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movq (%rdi), %rax
-; SSE41-NEXT: testq %rax, 8(%rdi)
-; SSE41-NEXT: sete %al
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: parseHeaders2_scalar_and:
-; AVX: # %bb.0:
-; AVX-NEXT: movq (%rdi), %rax
-; AVX-NEXT: testq %rax, 8(%rdi)
-; AVX-NEXT: sete %al
-; AVX-NEXT: retq
+; CHECK-LABEL: parseHeaders2_scalar_and:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: testq %rax, 8(%rdi)
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
%vload = load <2 x i64>, ptr %ptr, align 8
%v1 = extractelement <2 x i64> %vload, i32 0
%v2 = extractelement <2 x i64> %vload, i32 1
diff --git a/llvm/test/CodeGen/X86/pr85681.ll b/llvm/test/CodeGen/X86/pr85681.ll
new file mode 100644
index 0000000..3b27a02
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr85681.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=emeraldrapids | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
+
+; PR85681 - shift i1/vXi1 X, Y -> X as only Y==0 is defined
+
+define i32 @shl(i32 %a0) {
+; CHECK-LABEL: shl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %v0 = bitcast i32 %a0 to <32 x i1>
+ %s = shl <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, %v0
+ %r = bitcast <32 x i1> %s to i32
+ ret i32 %r
+}
+
+define i32 @lshr(i32 %a0) {
+; CHECK-LABEL: lshr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %v0 = bitcast i32 %a0 to <32 x i1>
+ %s = lshr <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, %v0
+ %r = bitcast <32 x i1> %s to i32
+ ret i32 %r
+}
+
+define i32 @ashr(i32 %a0) {
+; CHECK-LABEL: ashr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %v0 = bitcast i32 %a0 to <32 x i1>
+ %s = ashr <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, %v0
+ %r = bitcast <32 x i1> %s to i32
+ ret i32 %r
+}
diff --git a/llvm/test/CodeGen/X86/pr86305.ll b/llvm/test/CodeGen/X86/pr86305.ll
new file mode 100644
index 0000000..79b42bb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr86305.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16 | FileCheck %s
+
+define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind {
+; CHECK-LABEL: add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdx, %rbx
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: vmovd %eax, %xmm0
+; CHECK-NEXT: movzwl (%rdi), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: vmovd %eax, %xmm1
+; CHECK-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ %a = load bfloat, ptr %pa
+ %b = load bfloat, ptr %pb
+ %add = fadd bfloat %a, %b
+ store bfloat %add, ptr %pc
+ ret void
+}
+
+define <4 x bfloat> @fptrunc_v4f32(<4 x float> %a) nounwind {
+; CHECK-LABEL: fptrunc_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $72, %rsp
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, %ebx
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %ebp
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %r14d
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %r15d
+; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, %eax
+; CHECK-NEXT: vmovd %r15d, %xmm0
+; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $2, %r14d, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $3, %ebp, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $4, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $5, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $6, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $7, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: addq $72, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
+ %b = fptrunc <4 x float> %a to <4 x bfloat>
+ ret <4 x bfloat> %b
+}
diff --git a/llvm/test/CodeGen/X86/pr86880.mir b/llvm/test/CodeGen/X86/pr86880.mir
new file mode 100644
index 0000000..92ebf9a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr86880.mir
@@ -0,0 +1,21 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=x86_64-- -run-pass=machine-cp -o - %s | FileCheck %s
+
+---
+name: foo
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $eax
+
+ ; CHECK-LABEL: name: foo
+ ; CHECK: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, 10 /* regdef */, implicit-def dead $eax, 2686986 /* regdef:GR32_NOREX2 */, def renamable $r15d, 10 /* regdef */, implicit-def dead $ecx, 10 /* regdef */, implicit-def dead $edx, 2147483657 /* reguse tiedto:$0 */, $eax(tied-def 3)
+ ; CHECK-NEXT: renamable $ecx = COPY killed renamable $r15d
+ ; CHECK-NEXT: NOOP implicit $ecx
+ INLINEASM &"", 0 /* attdialect */, 10 /* regdef */, implicit-def dead $eax, 2686986 /* regdef:GR32_NOREX2 */, def renamable $r15d, 10 /* regdef */, implicit-def dead $ecx, 10 /* regdef */, implicit-def dead $edx, 2147483657 /* reguse tiedto:$0 */, $eax(tied-def 3)
+ renamable $ecx = COPY killed renamable $r15d
+ NOOP implicit $ecx
+
+...
diff --git a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
index 13b5a54..d09bcd6 100644
--- a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
+++ b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
@@ -103,6 +103,7 @@ registers:
- { id: 82, class: gr32 }
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
fixedStack:
- { id: 0, size: 4, alignment: 4, stack-id: default, isImmutable: true }
diff --git a/llvm/test/CodeGen/X86/sar_fold.ll b/llvm/test/CodeGen/X86/sar_fold.ll
index 21655e1..0f13969 100644
--- a/llvm/test/CodeGen/X86/sar_fold.ll
+++ b/llvm/test/CodeGen/X86/sar_fold.ll
@@ -44,3 +44,44 @@ define i32 @shl24sar25(i32 %a) #0 {
%2 = ashr exact i32 %1, 25
ret i32 %2
}
+
+define void @shl144sar48(ptr %p) #0 {
+; CHECK-LABEL: shl144sar48:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movswl (%eax), %ecx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: sarl $31, %edx
+; CHECK-NEXT: shldl $2, %ecx, %edx
+; CHECK-NEXT: shll $2, %ecx
+; CHECK-NEXT: movl %ecx, 12(%eax)
+; CHECK-NEXT: movl %edx, 16(%eax)
+; CHECK-NEXT: movl $0, 8(%eax)
+; CHECK-NEXT: movl $0, 4(%eax)
+; CHECK-NEXT: movl $0, (%eax)
+; CHECK-NEXT: retl
+ %a = load i160, ptr %p
+ %1 = shl i160 %a, 144
+ %2 = ashr exact i160 %1, 46
+ store i160 %2, ptr %p
+ ret void
+}
+
+define void @shl144sar2(ptr %p) #0 {
+; CHECK-LABEL: shl144sar2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movswl (%eax), %ecx
+; CHECK-NEXT: shll $14, %ecx
+; CHECK-NEXT: movl %ecx, 16(%eax)
+; CHECK-NEXT: movl $0, 8(%eax)
+; CHECK-NEXT: movl $0, 12(%eax)
+; CHECK-NEXT: movl $0, 4(%eax)
+; CHECK-NEXT: movl $0, (%eax)
+; CHECK-NEXT: retl
+ %a = load i160, ptr %p
+ %1 = shl i160 %a, 144
+ %2 = ashr exact i160 %1, 2
+ store i160 %2, ptr %p
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
index 2187c65..97c3c204 100644
--- a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
+++ b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
@@ -60,36 +60,30 @@ define void @failing(ptr %0, ptr %1) nounwind {
; CHECK-NEXT: .LBB0_2: # %vector.body
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-NEXT: movdqu 1024(%rdx,%rdi), %xmm5
-; CHECK-NEXT: movdqu 1040(%rdx,%rdi), %xmm6
-; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; CHECK-NEXT: movq %xmm5, %r8
-; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
-; CHECK-NEXT: movq %xmm5, %r9
-; CHECK-NEXT: cmpq 1040(%rdx,%rdi), %rsi
-; CHECK-NEXT: movq %rcx, %r10
-; CHECK-NEXT: sbbq %r9, %r10
-; CHECK-NEXT: setge %r9b
-; CHECK-NEXT: movzbl %r9b, %r9d
-; CHECK-NEXT: andl $1, %r9d
-; CHECK-NEXT: negq %r9
-; CHECK-NEXT: movq %r9, %xmm5
; CHECK-NEXT: cmpq 1024(%rdx,%rdi), %rsi
-; CHECK-NEXT: movq %rcx, %r9
-; CHECK-NEXT: sbbq %r8, %r9
+; CHECK-NEXT: movq %rcx, %r8
+; CHECK-NEXT: sbbq 1032(%rdx,%rdi), %r8
+; CHECK-NEXT: setge %r8b
+; CHECK-NEXT: movzbl %r8b, %r8d
+; CHECK-NEXT: andl $1, %r8d
+; CHECK-NEXT: negq %r8
+; CHECK-NEXT: movq %r8, %xmm5
+; CHECK-NEXT: cmpq 1040(%rdx,%rdi), %rsi
+; CHECK-NEXT: movq %rcx, %r8
+; CHECK-NEXT: sbbq 1048(%rdx,%rdi), %r8
; CHECK-NEXT: setge %r8b
; CHECK-NEXT: movzbl %r8b, %r8d
; CHECK-NEXT: andl $1, %r8d
; CHECK-NEXT: negq %r8
; CHECK-NEXT: movq %r8, %xmm6
-; CHECK-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0]
-; CHECK-NEXT: movdqa %xmm1, %xmm5
-; CHECK-NEXT: psllq %xmm4, %xmm5
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
+; CHECK-NEXT: movdqa %xmm1, %xmm6
+; CHECK-NEXT: psllq %xmm4, %xmm6
; CHECK-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,2,3]
; CHECK-NEXT: movdqa %xmm1, %xmm8
; CHECK-NEXT: psllq %xmm7, %xmm8
-; CHECK-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1]
-; CHECK-NEXT: andpd %xmm6, %xmm8
+; CHECK-NEXT: movsd {{.*#+}} xmm8 = xmm6[0],xmm8[1]
+; CHECK-NEXT: andpd %xmm5, %xmm8
; CHECK-NEXT: orpd %xmm8, %xmm3
; CHECK-NEXT: paddq %xmm2, %xmm4
; CHECK-NEXT: addq $32, %rdi
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index 2610f43..62051d1 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -1983,91 +1983,75 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movzwl 16(%eax), %edx
; X86-SSE-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X86-SSE-NEXT: movdqa (%eax), %xmm3
-; X86-SSE-NEXT: movdqa (%ecx), %xmm0
-; X86-SSE-NEXT: movdqa 16(%ecx), %xmm1
-; X86-SSE-NEXT: pxor %xmm5, %xmm5
-; X86-SSE-NEXT: movdqa %xmm3, %xmm2
-; X86-SSE-NEXT: pextrw $7, %xmm3, %eax
-; X86-SSE-NEXT: pextrw $4, %xmm3, %edi
-; X86-SSE-NEXT: pextrw $0, %xmm3, %ebp
-; X86-SSE-NEXT: pextrw $1, %xmm3, %esi
-; X86-SSE-NEXT: pextrw $3, %xmm3, %ebx
-; X86-SSE-NEXT: movdqa %xmm3, %xmm4
-; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; X86-SSE-NEXT: movd %xmm3, %ecx
+; X86-SSE-NEXT: movdqa (%eax), %xmm2
+; X86-SSE-NEXT: pxor %xmm1, %xmm1
+; X86-SSE-NEXT: movdqa %xmm2, %xmm0
+; X86-SSE-NEXT: pextrw $7, %xmm2, %eax
+; X86-SSE-NEXT: pextrw $4, %xmm2, %esi
+; X86-SSE-NEXT: pextrw $1, %xmm2, %edi
+; X86-SSE-NEXT: pextrw $0, %xmm2, %ebx
+; X86-SSE-NEXT: pextrw $3, %xmm2, %ebp
+; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: divl 28(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm1
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X86-SSE-NEXT: movd %xmm3, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
+; X86-SSE-NEXT: divl 24(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm3
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm5, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm5, %ecx
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm5
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; X86-SSE-NEXT: divl 16(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm1
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT: movd %xmm0, %eax
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: divl 20(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm0
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; X86-SSE-NEXT: movl %edi, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-SSE-NEXT: divl 16(%edi)
+; X86-SSE-NEXT: divl 4(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm3
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm2, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm1, %ecx
+; X86-SSE-NEXT: movl %ebx, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm1
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; X86-SSE-NEXT: divl (%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm0
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X86-SSE-NEXT: movl %ebp, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl (%edi)
-; X86-SSE-NEXT: movd %edx, %xmm1
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm2, %ecx
-; X86-SSE-NEXT: movl %esi, %eax
-; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm2
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; X86-SSE-NEXT: movd %xmm2, %ecx
-; X86-SSE-NEXT: movl %ebx, %eax
+; X86-SSE-NEXT: divl 12(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm3
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; X86-SSE-NEXT: movd %xmm2, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
+; X86-SSE-NEXT: divl 8(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm2
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm4, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm0, %ecx
-; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm0
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; X86-SSE-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl 32(%edi)
+; X86-SSE-NEXT: divl 32(%ecx)
; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; X86-SSE-NEXT: pmuludq %xmm2, %xmm4
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X86-SSE-NEXT: pmuludq %xmm2, %xmm0
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-SSE-NEXT: pmuludq %xmm2, %xmm3
; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X86-SSE-NEXT: pmuludq %xmm2, %xmm3
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X86-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
; X86-SSE-NEXT: movl %eax, (%eax)
-; X86-SSE-NEXT: movdqa %xmm3, (%eax)
+; X86-SSE-NEXT: movdqa %xmm1, (%eax)
; X86-SSE-NEXT: movdqa %xmm0, (%eax)
; X86-SSE-NEXT: addl $4, %esp
; X86-SSE-NEXT: popl %esi
@@ -2204,91 +2188,76 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X64-SSE-LABEL: PR34947:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movzwl 16(%rdi), %ecx
-; X64-SSE-NEXT: movdqa (%rdi), %xmm3
-; X64-SSE-NEXT: movdqa (%rsi), %xmm0
-; X64-SSE-NEXT: movdqa 16(%rsi), %xmm1
-; X64-SSE-NEXT: pxor %xmm5, %xmm5
-; X64-SSE-NEXT: movdqa %xmm3, %xmm2
-; X64-SSE-NEXT: pextrw $7, %xmm3, %eax
-; X64-SSE-NEXT: pextrw $4, %xmm3, %r8d
-; X64-SSE-NEXT: pextrw $0, %xmm3, %r10d
-; X64-SSE-NEXT: pextrw $1, %xmm3, %edi
-; X64-SSE-NEXT: pextrw $3, %xmm3, %r9d
-; X64-SSE-NEXT: movdqa %xmm3, %xmm4
-; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; X64-SSE-NEXT: movd %xmm3, %r11d
-; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r11d
-; X64-SSE-NEXT: movd %edx, %xmm3
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm5, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm5, %r11d
+; X64-SSE-NEXT: movdqa (%rdi), %xmm2
+; X64-SSE-NEXT: pxor %xmm1, %xmm1
+; X64-SSE-NEXT: movdqa %xmm2, %xmm0
+; X64-SSE-NEXT: pextrw $7, %xmm2, %eax
+; X64-SSE-NEXT: pextrw $4, %xmm2, %edi
+; X64-SSE-NEXT: pextrw $1, %xmm2, %r8d
+; X64-SSE-NEXT: pextrw $0, %xmm2, %r9d
+; X64-SSE-NEXT: pextrw $3, %xmm2, %r10d
+; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r11d
-; X64-SSE-NEXT: movd %edx, %xmm5
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; X64-SSE-NEXT: movl %r8d, %eax
+; X64-SSE-NEXT: divl 28(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm1
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X64-SSE-NEXT: movd %xmm3, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl 16(%rsi)
+; X64-SSE-NEXT: divl 24(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm3
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm2, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm1, %r8d
-; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r8d
-; X64-SSE-NEXT: movd %edx, %xmm1
; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; X64-SSE-NEXT: movl %r10d, %eax
+; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl (%rsi)
+; X64-SSE-NEXT: divl 16(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm2, %r8d
-; X64-SSE-NEXT: movl %edi, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT: movd %xmm0, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r8d
-; X64-SSE-NEXT: movd %edx, %xmm2
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; X64-SSE-NEXT: movd %xmm2, %edi
+; X64-SSE-NEXT: divl 20(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm0
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X64-SSE-NEXT: movl %r8d, %eax
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: divl 4(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm0
; X64-SSE-NEXT: movl %r9d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %edi
-; X64-SSE-NEXT: movd %edx, %xmm2
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm4, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm0, %edi
+; X64-SSE-NEXT: divl (%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm3
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X64-SSE-NEXT: movl %r10d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %edi
+; X64-SSE-NEXT: divl 12(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm0
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; X64-SSE-NEXT: movd %xmm2, %eax
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: divl 8(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm2
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; X64-SSE-NEXT: movl %ecx, %eax
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl 32(%rsi)
; X64-SSE-NEXT: movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; X64-SSE-NEXT: pmuludq %xmm0, %xmm3
; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
; X64-SSE-NEXT: movl %eax, (%rax)
-; X64-SSE-NEXT: movdqa %xmm3, (%rax)
; X64-SSE-NEXT: movdqa %xmm1, (%rax)
+; X64-SSE-NEXT: movdqa %xmm3, (%rax)
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: PR34947:
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
index 2d7e6f6..a80f380 100644
--- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
+++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
@@ -1333,8 +1333,10 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512VL-LABEL: negative:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512VL-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512VL-NEXT: vpternlogq $206, %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm2[0,3,2,3]
; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/stack-protector.ll b/llvm/test/CodeGen/X86/stack-protector.ll
index a277f9f..f4f3ae4 100644
--- a/llvm/test/CodeGen/X86/stack-protector.ll
+++ b/llvm/test/CodeGen/X86/stack-protector.ll
@@ -1,6 +1,7 @@
; RUN: llc -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-I386 %s
; RUN: llc -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
; RUN: llc -code-model=kernel -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-KERNEL-X64 %s
+; RUN: llc -code-model=kernel -mtriple=x86_64-unknown-freebsd < %s -o - | FileCheck --check-prefix=FREEBSD-KERNEL-X64 %s
; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | FileCheck --check-prefix=DARWIN-X64 %s
; RUN: llc -mtriple=amd64-pc-openbsd < %s -o - | FileCheck --check-prefix=OPENBSD-AMD64 %s
; RUN: llc -mtriple=i386-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-I386 %s
@@ -75,6 +76,10 @@ entry:
; LINUX-X64: mov{{l|q}} %fs:
; LINUX-X64: callq __stack_chk_fail
+; FREEBSD-KERNEL-X64-LABEL: test1b:
+; FREEBSD-KERNEL-X64-NOT: mov{{l|q}} __stack_chk_guard@GOTPCREL
+; FREEBSD-KERNEL-X64: callq __stack_chk_fail
+
; LINUX-KERNEL-X64-LABEL: test1b:
; LINUX-KERNEL-X64: mov{{l|q}} %gs:
; LINUX-KERNEL-X64: callq __stack_chk_fail
@@ -118,6 +123,10 @@ entry:
; LINUX-X64: mov{{l|q}} %fs:
; LINUX-X64: callq __stack_chk_fail
+; FREEBSD-KERNEL-X64-LABEL: test1c:
+; FREEBSD-KERNEL-X64: mov{{l|q}} __stack_chk_guard(%rip)
+; FREEBSD-KERNEL-X64: callq __stack_chk_fail
+
; LINUX-KERNEL-X64-LABEL: test1c:
; LINUX-KERNEL-X64: mov{{l|q}} %gs:
; LINUX-KERNEL-X64: callq __stack_chk_fail
diff --git a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
index 02c9310..8bac140 100644
--- a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
@@ -6,6 +6,8 @@
---
name: test_relocate
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0.entry:
liveins: $rdi
@@ -25,6 +27,8 @@ body: |
---
name: test_relocate_multi_regmasks
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0.entry:
liveins: $rdi
diff --git a/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir b/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
index 30a68e6..4a18351 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
@@ -61,7 +61,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
index 11968f1..5f05270 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
@@ -231,7 +231,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 1
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
index aae2f38..cf91282 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
@@ -398,7 +398,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 1
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
index 87f5f0f..fcebc69 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
@@ -175,7 +175,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
index 4925396..8bb39a0 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
@@ -226,7 +226,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
index 858ff3f..da651039 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
@@ -172,7 +172,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
index e24d5e8..d40a9a0 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
@@ -114,7 +114,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-vreg.mir b/llvm/test/CodeGen/X86/statepoint-vreg.mir
index bfeadfc..a0c596f 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg.mir
@@ -134,6 +134,8 @@ registers:
liveins:
- { reg: '$rdi', virtual-reg: '%0' }
- { reg: '$rsi', virtual-reg: '%1' }
+frameInfo:
+ adjustsStack: true
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/X86/tls-align.ll b/llvm/test/CodeGen/X86/tls-align.ll
index 3c8ee6b..e996c00 100644
--- a/llvm/test/CodeGen/X86/tls-align.ll
+++ b/llvm/test/CodeGen/X86/tls-align.ll
@@ -12,7 +12,7 @@
define internal fastcc void @foo() unnamed_addr {
entry:
- store <8 x ptr> <ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr null>, ptr @array, align 32
+ store <8 x ptr> <ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr null>, ptr @array, align 32
ret void
}
diff --git a/llvm/test/CodeGen/X86/tls-desc.ll b/llvm/test/CodeGen/X86/tls-desc.ll
new file mode 100644
index 0000000..c73986e6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/tls-desc.ll
@@ -0,0 +1,199 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=i686 --relocation-model=pic -enable-tlsdesc | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 --relocation-model=pic -enable-tlsdesc | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64 --relocation-model=pic -enable-tlsdesc | FileCheck %s --check-prefix=X64
+
+@x = thread_local global i32 0, align 4
+@y = internal thread_local global i32 1, align 4
+@z = external hidden thread_local global i32, align 4
+
+define ptr @f1() nounwind {
+; X86-LABEL: f1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll .L0$pb
+; X86-NEXT: .L0$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp0:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %ebx
+; X86-NEXT: #APP
+; X86-NEXT: #NO_APP
+; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: leal x@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *x@tlscall(%eax)
+; X86-NEXT: addl %gs:0, %eax
+; X86-NEXT: movl (%esp), %ebx # 4-byte Reload
+; X86-NEXT: #APP
+; X86-NEXT: #NO_APP
+; X86-NEXT: addl $4, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X32-LABEL: f1:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: #APP
+; X32-NEXT: #NO_APP
+; X32-NEXT: leal x@tlsdesc(%rip), %eax
+; X32-NEXT: callq *x@tlscall(%eax)
+; X32-NEXT: # kill: def $eax killed $eax def $rax
+; X32-NEXT: addl %fs:0, %eax
+; X32-NEXT: #APP
+; X32-NEXT: #NO_APP
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f1:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: #APP
+; X64-NEXT: #NO_APP
+; X64-NEXT: leaq x@tlsdesc(%rip), %rax
+; X64-NEXT: callq *x@tlscall(%rax)
+; X64-NEXT: addq %fs:0, %rax
+; X64-NEXT: #APP
+; X64-NEXT: #NO_APP
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %a = call { i32, i32, i32, i32, i32, i32 } asm sideeffect "", "=r,=r,=r,=r,=r,=r,~{dirflag},~{fpsr},~{flags}"()
+ %b = call ptr @llvm.threadlocal.address.p0(ptr @x)
+ %a.0 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 0
+ %a.1 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 1
+ %a.2 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 2
+ %a.3 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 3
+ %a.4 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 4
+ %a.5 = extractvalue { i32, i32, i32, i32, i32, i32 } %a, 5
+ call void asm sideeffect "", "r,r,r,r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %a.0, i32 %a.1, i32 %a.2, i32 %a.3, i32 %a.4, i32 %a.5)
+ ret ptr %b
+}
+
+define i32 @f2() nounwind {
+; X86-LABEL: f2:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: calll .L1$pb
+; X86-NEXT: .L1$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp1:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L1$pb), %ebx
+; X86-NEXT: movl %gs:0, %ecx
+; X86-NEXT: leal x@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *x@tlscall(%eax)
+; X86-NEXT: movl (%eax,%ecx), %eax
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X32-LABEL: f2:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: movl %fs:0, %ecx
+; X32-NEXT: leal x@tlsdesc(%rip), %eax
+; X32-NEXT: callq *x@tlscall(%eax)
+; X32-NEXT: movl (%eax,%ecx), %eax
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f2:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movq %fs:0, %rcx
+; X64-NEXT: leaq x@tlsdesc(%rip), %rax
+; X64-NEXT: callq *x@tlscall(%rax)
+; X64-NEXT: movl (%rax,%rcx), %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %1 = tail call ptr @llvm.threadlocal.address.p0(ptr @x)
+ %2 = load i32, ptr %1
+ ret i32 %2
+}
+
+define ptr @f3() nounwind {
+; X86-LABEL: f3:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: calll .L2$pb
+; X86-NEXT: .L2$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp2:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L2$pb), %ebx
+; X86-NEXT: leal x@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *x@tlscall(%eax)
+; X86-NEXT: addl %gs:0, %eax
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X32-LABEL: f3:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: leal x@tlsdesc(%rip), %eax
+; X32-NEXT: callq *x@tlscall(%eax)
+; X32-NEXT: # kill: def $eax killed $eax def $rax
+; X32-NEXT: addl %fs:0, %eax
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f3:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: leaq x@tlsdesc(%rip), %rax
+; X64-NEXT: callq *x@tlscall(%rax)
+; X64-NEXT: addq %fs:0, %rax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %1 = tail call ptr @llvm.threadlocal.address.p0(ptr @x)
+ ret ptr %1
+}
+
+define i32 @f4() nounwind {
+; X86-LABEL: f4:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: calll .L3$pb
+; X86-NEXT: .L3$pb:
+; X86-NEXT: popl %ebx
+; X86-NEXT: .Ltmp3:
+; X86-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L3$pb), %ebx
+; X86-NEXT: movl %gs:0, %edx
+; X86-NEXT: leal _TLS_MODULE_BASE_@tlsdesc(%ebx), %eax
+; X86-NEXT: calll *_TLS_MODULE_BASE_@tlscall(%eax)
+; X86-NEXT: movl y@DTPOFF(%eax,%edx), %ecx
+; X86-NEXT: addl z@DTPOFF(%eax,%edx), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X32-LABEL: f4:
+; X32: # %bb.0:
+; X32-NEXT: pushq %rax
+; X32-NEXT: movl %fs:0, %edx
+; X32-NEXT: leal _TLS_MODULE_BASE_@tlsdesc(%rip), %eax
+; X32-NEXT: callq *_TLS_MODULE_BASE_@tlscall(%eax)
+; X32-NEXT: movl y@DTPOFF(%eax,%edx), %ecx
+; X32-NEXT: addl z@DTPOFF(%eax,%edx), %ecx
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: popq %rcx
+; X32-NEXT: retq
+;
+; X64-LABEL: f4:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movq %fs:0, %rdx
+; X64-NEXT: leaq _TLS_MODULE_BASE_@tlsdesc(%rip), %rax
+; X64-NEXT: callq *_TLS_MODULE_BASE_@tlscall(%rax)
+; X64-NEXT: movl y@DTPOFF(%rax,%rdx), %ecx
+; X64-NEXT: addl z@DTPOFF(%rax,%rdx), %ecx
+; X64-NEXT: movl %ecx, %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %1 = load i32, ptr @y, align 4
+ %2 = load i32, ptr @z, align 4
+ %3 = add nsw i32 %1, %2
+ ret i32 %3
+}
diff --git a/llvm/test/CodeGen/X86/tls-loads-control3.ll b/llvm/test/CodeGen/X86/tls-loads-control3.ll
index 82daac5..4e521b1 100644
--- a/llvm/test/CodeGen/X86/tls-loads-control3.ll
+++ b/llvm/test/CodeGen/X86/tls-loads-control3.ll
@@ -183,7 +183,6 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
; HOIST0-NEXT: # %bb.1: # %while.body.preheader
; HOIST0-NEXT: leaq _ZZ2f2iE2st.0@TLSLD(%rip), %rdi
; HOIST0-NEXT: callq __tls_get_addr@PLT
-; HOIST0-NEXT: movq %rax, %rcx
; HOIST0-NEXT: leaq _ZZ2f2iE2st.0@DTPOFF(%rax), %r15
; HOIST0-NEXT: leaq _ZZ2f2iE2st.1@DTPOFF(%rax), %r12
; HOIST0-NEXT: .p2align 4, 0x90
@@ -245,9 +244,7 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
; HOIST2-NEXT: movq %rax, %r14
; HOIST2-NEXT: addb %bpl, _ZZ2f2iE2st.0@DTPOFF(%rax)
; HOIST2-NEXT: callq _Z5gfuncv@PLT
-; HOIST2-NEXT: movl %eax, %ecx
-; HOIST2-NEXT: movq %r14, %rax
-; HOIST2-NEXT: addl %ecx, _ZZ2f2iE2st.1@DTPOFF(%r14)
+; HOIST2-NEXT: addl %eax, _ZZ2f2iE2st.1@DTPOFF(%r14)
; HOIST2-NEXT: decl %ebx
; HOIST2-NEXT: jne .LBB1_2
; HOIST2-NEXT: .LBB1_3: # %while.end
diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll
index 99a3821..f2240a9 100644
--- a/llvm/test/CodeGen/X86/var-permute-128.ll
+++ b/llvm/test/CodeGen/X86/var-permute-128.ll
@@ -1101,17 +1101,13 @@ define <16 x i8> @var_shuffle_v16i8_from_v32i8_v16i8(<32 x i8> %v, <16 x i8> %in
define void @indices_convert() {
; SSE3-LABEL: indices_convert:
; SSE3: # %bb.0: # %bb
-; SSE3-NEXT: movdqa (%rax), %xmm0
-; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE3-NEXT: movd %xmm1, %eax
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps (%rax), %xmm0
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movl (%rax), %eax
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE3-NEXT: andl $3, %eax
-; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; SSE3-NEXT: movd %xmm1, %ecx
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: andl $3, %ecx
; SSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE3-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1120,17 +1116,13 @@ define void @indices_convert() {
;
; SSSE3-LABEL: indices_convert:
; SSSE3: # %bb.0: # %bb
-; SSSE3-NEXT: movdqa (%rax), %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSSE3-NEXT: movd %xmm1, %eax
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps (%rax), %xmm0
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movl (%rax), %eax
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $3, %eax
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; SSSE3-NEXT: movd %xmm1, %ecx
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
index 7bbcdee..e26de4b 100644
--- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
@@ -2911,23 +2911,12 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
;
define <2 x double> @sitofp_load_2i64_to_2f64(ptr%a) {
-; SSE2-LABEL: sitofp_load_2i64_to_2f64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sd %rax, %xmm1
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: sitofp_load_2i64_to_2f64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: cvtsi2sdq 8(%rdi), %xmm1
-; SSE41-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE41-NEXT: retq
+; SSE-LABEL: sitofp_load_2i64_to_2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtsi2sdq 8(%rdi), %xmm1
+; SSE-NEXT: cvtsi2sdq (%rdi), %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_2i64_to_2f64:
; VEX: # %bb.0:
@@ -3093,35 +3082,16 @@ define <2 x double> @sitofp_load_2i8_to_2f64(ptr%a) {
}
define <4 x double> @sitofp_load_4i64_to_4f64(ptr%a) {
-; SSE2-LABEL: sitofp_load_4i64_to_4f64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm2
-; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sd %rax, %xmm1
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sdq 16(%rdi), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2sd %rax, %xmm2
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: sitofp_load_4i64_to_4f64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: cvtsi2sdq 8(%rdi), %xmm1
-; SSE41-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE41-NEXT: cvtsi2sdq 24(%rdi), %xmm2
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: cvtsi2sdq 16(%rdi), %xmm1
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE41-NEXT: retq
+; SSE-LABEL: sitofp_load_4i64_to_4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtsi2sdq 8(%rdi), %xmm1
+; SSE-NEXT: cvtsi2sdq (%rdi), %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: cvtsi2sdq 24(%rdi), %xmm2
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: cvtsi2sdq 16(%rdi), %xmm1
+; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_4i64_to_4f64:
; VEX: # %bb.0:
@@ -3865,22 +3835,14 @@ define <4 x double> @uitofp_load_4i8_to_4f64(ptr%a) {
define <4 x float> @sitofp_load_4i64_to_4f32(ptr%a) {
; SSE2-LABEL: sitofp_load_4i64_to_4f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 24(%rdi), %xmm0
+; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 8(%rdi), %xmm2
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: sitofp_load_4i64_to_4f32:
@@ -4015,39 +3977,24 @@ define <4 x float> @sitofp_load_4i8_to_4f32(ptr%a) {
define <8 x float> @sitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-LABEL: sitofp_load_8i64_to_8f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movdqa 32(%rdi), %xmm2
-; SSE2-NEXT: movdqa 48(%rdi), %xmm3
-; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 24(%rdi), %xmm0
+; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 8(%rdi), %xmm2
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-NEXT: xorps %xmm4, %xmm4
-; SSE2-NEXT: cvtsi2ssq 48(%rdi), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE2-NEXT: cvtsi2ssq 56(%rdi), %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ssq 48(%rdi), %xmm2
+; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: cvtsi2ssq 40(%rdi), %xmm3
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: cvtsi2ssq 32(%rdi), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: sitofp_load_8i64_to_8f32:
@@ -4256,70 +4203,64 @@ define <8 x float> @sitofp_load_8i8_to_8f32(ptr%a) {
define <4 x float> @uitofp_load_4i64_to_4f32(ptr%a) {
; SSE2-LABEL: uitofp_load_4i64_to_4f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: movq 24(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_1
; SSE2-NEXT: # %bb.2:
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: jmp .LBB83_3
; SSE2-NEXT: .LBB83_1:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
+; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_3:
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB83_6
; SSE2-NEXT: .LBB83_4:
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: .LBB83_6:
+; SSE2-NEXT: movq (%rdi), %rax
+; SSE2-NEXT: movq 8(%rdi), %rcx
+; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: js .LBB83_7
+; SSE2-NEXT: # %bb.8:
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
+; SSE2-NEXT: jmp .LBB83_9
+; SSE2-NEXT: .LBB83_7:
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: shrq %rdx
; SSE2-NEXT: andl $1, %ecx
; SSE2-NEXT: orq %rdx, %rcx
; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
; SSE2-NEXT: addss %xmm2, %xmm2
-; SSE2-NEXT: .LBB83_6:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: testq %rax, %rax
-; SSE2-NEXT: js .LBB83_7
-; SSE2-NEXT: # %bb.8:
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: jmp .LBB83_9
-; SSE2-NEXT: .LBB83_7:
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shrq %rcx
-; SSE2-NEXT: andl $1, %eax
-; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_9:
-; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_10
; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: xorps %xmm0, %xmm0
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: jmp .LBB83_12
; SSE2-NEXT: .LBB83_10:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: addss %xmm2, %xmm2
+; SSE2-NEXT: xorps %xmm0, %xmm0
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
+; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_12:
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -4591,8 +4532,7 @@ define <4 x float> @uitofp_load_4i8_to_4f32(ptr%a) {
define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-LABEL: uitofp_load_8i64_to_8f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: movq 24(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_1
; SSE2-NEXT: # %bb.2:
@@ -4606,127 +4546,114 @@ define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-NEXT: cvtsi2ss %rax, %xmm2
; SSE2-NEXT: addss %xmm2, %xmm2
; SSE2-NEXT: .LBB87_3:
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB87_6
; SSE2-NEXT: .LBB87_4:
-; SSE2-NEXT: movq %rcx, %rdx
-; SSE2-NEXT: shrq %rdx
-; SSE2-NEXT: andl $1, %ecx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm1
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: addss %xmm1, %xmm1
; SSE2-NEXT: .LBB87_6:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: movq (%rdi), %rax
+; SSE2-NEXT: movq 8(%rdi), %rcx
+; SSE2-NEXT: testq %rcx, %rcx
; SSE2-NEXT: js .LBB87_7
; SSE2-NEXT: # %bb.8:
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: jmp .LBB87_9
-; SSE2-NEXT: .LBB87_7:
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm3
+; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: jns .LBB87_11
+; SSE2-NEXT: .LBB87_10:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: addss %xmm0, %xmm0
-; SSE2-NEXT: .LBB87_9:
-; SSE2-NEXT: movq 48(%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
-; SSE2-NEXT: js .LBB87_10
-; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm4
; SSE2-NEXT: jmp .LBB87_12
-; SSE2-NEXT: .LBB87_10:
+; SSE2-NEXT: .LBB87_7:
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: shrq %rdx
; SSE2-NEXT: andl $1, %ecx
; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm4
-; SSE2-NEXT: addss %xmm4, %xmm4
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm3
+; SSE2-NEXT: addss %xmm3, %xmm3
+; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: js .LBB87_10
+; SSE2-NEXT: .LBB87_11:
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: .LBB87_12:
-; SSE2-NEXT: movdqa 48(%rdi), %xmm5
+; SSE2-NEXT: movq 56(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_13
; SSE2-NEXT: # %bb.14:
-; SSE2-NEXT: xorps %xmm3, %xmm3
-; SSE2-NEXT: cvtsi2ss %rax, %xmm3
+; SSE2-NEXT: cvtsi2ss %rax, %xmm5
; SSE2-NEXT: jmp .LBB87_15
; SSE2-NEXT: .LBB87_13:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm3, %xmm3
-; SSE2-NEXT: cvtsi2ss %rax, %xmm3
-; SSE2-NEXT: addss %xmm3, %xmm3
+; SSE2-NEXT: cvtsi2ss %rax, %xmm5
+; SSE2-NEXT: addss %xmm5, %xmm5
; SSE2-NEXT: .LBB87_15:
-; SSE2-NEXT: movq 32(%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; SSE2-NEXT: movq %xmm5, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 48(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_16
; SSE2-NEXT: # %bb.17:
-; SSE2-NEXT: xorps %xmm5, %xmm5
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm5
+; SSE2-NEXT: cvtsi2ss %rax, %xmm4
; SSE2-NEXT: jmp .LBB87_18
; SSE2-NEXT: .LBB87_16:
-; SSE2-NEXT: movq %rcx, %rdx
-; SSE2-NEXT: shrq %rdx
-; SSE2-NEXT: andl $1, %ecx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: xorps %xmm5, %xmm5
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm5
-; SSE2-NEXT: addss %xmm5, %xmm5
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm4
+; SSE2-NEXT: addss %xmm4, %xmm4
; SSE2-NEXT: .LBB87_18:
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movdqa 32(%rdi), %xmm4
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: movq 40(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_19
; SSE2-NEXT: # %bb.20:
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm2
; SSE2-NEXT: jmp .LBB87_21
; SSE2-NEXT: .LBB87_19:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: addss %xmm2, %xmm2
; SSE2-NEXT: .LBB87_21:
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: movq 32(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_22
; SSE2-NEXT: # %bb.23:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB87_24
; SSE2-NEXT: .LBB87_22:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: addss %xmm2, %xmm2
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: addss %xmm1, %xmm1
; SSE2-NEXT: .LBB87_24:
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: uitofp_load_8i64_to_8f32:
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index ba21af2..563cf01 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -4989,3 +4989,257 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
%ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret <4 x i32> %ext
}
+
+define <4 x i32> @fptosi_4f16_to_4i32(<4 x half> %a) nounwind {
+; AVX-LABEL: fptosi_4f16_to_4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: subq $72, %rsp
+; AVX-NEXT: vmovdqa %xmm0, %xmm1
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $72, %rsp
+; AVX-NEXT: retq
+;
+; F16C-LABEL: fptosi_4f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm0
+; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; F16C-NEXT: vzeroupper
+; F16C-NEXT: retq
+;
+; AVX512-LABEL: fptosi_4f16_to_4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512-NEXT: vcvttps2dq %ymm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %cvt = fptosi <4 x half> %a to <4 x i32>
+ ret <4 x i32> %cvt
+}
+
+define <4 x i32> @fptoui_2f16_to_4i32(<2 x half> %a) nounwind {
+; AVX1-LABEL: fptoui_2f16_to_4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: subq $40, %rsp
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: addq $40, %rsp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: fptoui_2f16_to_4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: subq $40, %rsp
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: addq $40, %rsp
+; AVX2-NEXT: retq
+;
+; F16C-LABEL: fptoui_2f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vpsrld $16, %xmm0, %xmm1
+; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; F16C-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; F16C-NEXT: vcvttps2dq %xmm0, %xmm1
+; F16C-NEXT: vpsrad $31, %xmm1, %xmm2
+; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
+; F16C-NEXT: vpand %xmm2, %xmm0, %xmm0
+; F16C-NEXT: vpor %xmm0, %xmm1, %xmm0
+; F16C-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; F16C-NEXT: retq
+;
+; AVX512F-LABEL: fptoui_2f16_to_4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512F-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512-FASTLANE-LABEL: fptoui_2f16_to_4i32:
+; AVX512-FASTLANE: # %bb.0:
+; AVX512-FASTLANE-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-FASTLANE-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-FASTLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512-FASTLANE-NEXT: vcvttps2udq %xmm0, %xmm0
+; AVX512-FASTLANE-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512-FASTLANE-NEXT: retq
+ %cvt = fptoui <2 x half> %a to <2 x i32>
+ %ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %ext
+}
+
+define <4 x i32> @fptoui_4f16_to_4i32(<4 x half> %a) nounwind {
+; AVX1-LABEL: fptoui_4f16_to_4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: subq $72, %rsp
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX1-NEXT: addq $72, %rsp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: fptoui_4f16_to_4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: subq $72, %rsp
+; AVX2-NEXT: vmovdqa %xmm0, %xmm1
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX2-NEXT: addq $72, %rsp
+; AVX2-NEXT: retq
+;
+; F16C-LABEL: fptoui_4f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm1
+; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm0
+; F16C-NEXT: vorps %ymm0, %ymm1, %ymm0
+; F16C-NEXT: vblendvps %ymm1, %ymm0, %ymm1, %ymm0
+; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; F16C-NEXT: vzeroupper
+; F16C-NEXT: retq
+;
+; AVX512F-LABEL: fptoui_4f16_to_4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512-FASTLANE-LABEL: fptoui_4f16_to_4i32:
+; AVX512-FASTLANE: # %bb.0:
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512-FASTLANE-NEXT: vcvttps2udq %ymm0, %ymm0
+; AVX512-FASTLANE-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-FASTLANE-NEXT: vzeroupper
+; AVX512-FASTLANE-NEXT: retq
+ %cvt = fptoui <4 x half> %a to <4 x i32>
+ ret <4 x i32> %cvt
+}
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 2b539ae..f56c43e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -2208,15 +2208,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23]
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5]
@@ -2284,15 +2282,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm8 = ymm0[0,2,1,1,4,6,5,5]
@@ -2300,15 +2296,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,ymm1[29,26],zero,ymm1[28],zero,ymm1[30],zero,ymm1[28,29],zero,ymm1[31],zero,ymm1[29]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[29,26],zero,ymm3[28],zero,ymm3[26,27,28,29],zero,ymm3[31],zero,ymm3[29,30],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[2,2,3,3,6,6,7,7]
@@ -2375,15 +2369,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,6,5,5,5,5,4,6]
@@ -2391,15 +2383,13 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,ymm1[29,26],zero,ymm1[28],zero,ymm1[30],zero,ymm1[28,29],zero,ymm1[31],zero,ymm1[29]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[29,26],zero,ymm3[28],zero,ymm3[26,27,28,29],zero,ymm3[31],zero,ymm3[29,30],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [6,6,6,6,7,7,7,7]
@@ -2430,10 +2420,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512-LABEL: store_i8_stride5_vf32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-NEXT: vmovdqa (%rsi), %ymm2
-; AVX512-NEXT: vmovdqa (%rdx), %ymm3
-; AVX512-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512-NEXT: vmovdqa (%rdx), %ymm1
+; AVX512-NEXT: vmovdqa (%rcx), %ymm2
; AVX512-NEXT: vmovdqa (%r8), %ymm0
; AVX512-NEXT: vmovdqa (%rdi), %xmm5
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[8],zero,xmm5[u,7],zero,xmm5[9],zero,xmm5[u],zero,xmm5[u,10],zero,xmm5[12],zero,xmm5[u,11]
@@ -2463,45 +2453,40 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512-NEXT: vpermd %zmm6, %zmm8, %zmm6
; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u]
; AVX512-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u,u],zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u,u,19]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u,u],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u,u,19]
; AVX512-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512-NEXT: vpternlogq $226, %ymm5, %ymm11, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21,u],zero,ymm4[20],zero,ymm4[22],zero,ymm4[24,u],zero,ymm4[23],zero,ymm4[25,u]
+; AVX512-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm3[21],zero,ymm3[21,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero
; AVX512-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5]
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,3,2]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX512-NEXT: vpandn %ymm9, %ymm11, %ymm9
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX512-NEXT: vpandn %ymm9, %ymm10, %ymm9
; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,ymm3[26],zero,ymm3[28],zero,ymm3[30],zero,zero,ymm3[29],zero,ymm3[31],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm4[26],zero,ymm4[28,u],zero,ymm4[u],zero,ymm4[29],zero,ymm4[31,u],zero,ymm4[30]
+; AVX512-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512-NEXT: vpternlogq $248, %ymm10, %ymm4, %ymm3
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,ymm1[26],zero,ymm1[28],zero,ymm1[30],zero,zero,ymm1[29],zero,ymm1[31],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27,u],zero,ymm2[26],zero,ymm2[28],zero,ymm2[30,u],zero,ymm2[29],zero,ymm2[31,u]
+; AVX512-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
-; AVX512-NEXT: vpternlogq $184, %ymm3, %ymm7, %ymm1
+; AVX512-NEXT: vpternlogq $226, %ymm3, %ymm7, %ymm1
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
@@ -2513,10 +2498,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512-FCP-LABEL: store_i8_stride5_vf32:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm0
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm1
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm6
@@ -2545,26 +2530,23 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512-FCP-NEXT: vpermd %zmm4, %zmm7, %zmm7
; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u]
; AVX512-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u,u],zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u,u,19]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u,u],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u,u,19]
; AVX512-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21,u],zero,ymm3[20],zero,ymm3[22],zero,ymm3[24,u],zero,ymm3[23],zero,ymm3[25,u]
+; AVX512-FCP-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[21],zero,zero,ymm0[20],zero,ymm0[22],zero,ymm0[24],zero,zero,ymm0[23],zero,ymm0[25],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm2[21],zero,ymm2[21,20],zero,ymm2[22],zero,ymm2[24],zero,ymm2[22,23],zero,ymm2[25]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512-FCP-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,0,5,5,5,5,0,6]
; AVX512-FCP-NEXT: vpermd %ymm4, %ymm8, %ymm8
@@ -2573,17 +2555,15 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[12],zero,zero,zero,zero,ymm4[13],zero,zero,zero,zero,ymm4[14],zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,ymm4[18],zero
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm3[26],zero,ymm3[28,u],zero,ymm3[u],zero,ymm3[29],zero,ymm3[31,u],zero,ymm3[30]
+; AVX512-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm10, %ymm3, %ymm2
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[26],zero,ymm0[28],zero,zero,ymm0[27],zero,ymm0[29],zero,ymm0[31],zero,zero,ymm0[30],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[27,u],zero,ymm1[26],zero,ymm1[28],zero,ymm1[30,u],zero,ymm1[29],zero,ymm1[31,u]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm2, %ymm6, %ymm0
+; AVX512-FCP-NEXT: vpternlogq $226, %ymm2, %ymm6, %ymm0
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [6,6,6,0,7,7,7,7]
; AVX512-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
@@ -2595,10 +2575,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512DQ-LABEL: store_i8_stride5_vf32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm3
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm1
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm2
; AVX512DQ-NEXT: vmovdqa (%r8), %ymm0
; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm5
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[8],zero,xmm5[u,7],zero,xmm5[9],zero,xmm5[u],zero,xmm5[u,10],zero,xmm5[12],zero,xmm5[u,11]
@@ -2628,45 +2608,40 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512DQ-NEXT: vpermd %zmm6, %zmm8, %zmm6
; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u]
; AVX512DQ-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u,u],zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u,u,19]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u,u],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u,u,19]
; AVX512DQ-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512DQ-NEXT: vpternlogq $226, %ymm5, %ymm11, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21,u],zero,ymm4[20],zero,ymm4[22],zero,ymm4[24,u],zero,ymm4[23],zero,ymm4[25,u]
+; AVX512DQ-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512DQ-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm3[21],zero,ymm3[21,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512DQ-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,3,2]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX512DQ-NEXT: vpandn %ymm9, %ymm11, %ymm9
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX512DQ-NEXT: vpandn %ymm9, %ymm10, %ymm9
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,ymm3[26],zero,ymm3[28],zero,ymm3[30],zero,zero,ymm3[29],zero,ymm3[31],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm4[26],zero,ymm4[28,u],zero,ymm4[u],zero,ymm4[29],zero,ymm4[31,u],zero,ymm4[30]
+; AVX512DQ-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm10, %ymm4, %ymm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,ymm1[26],zero,ymm1[28],zero,ymm1[30],zero,zero,ymm1[29],zero,ymm1[31],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27,u],zero,ymm2[26],zero,ymm2[28],zero,ymm2[30,u],zero,ymm2[29],zero,ymm2[31,u]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
-; AVX512DQ-NEXT: vpternlogq $184, %ymm3, %ymm7, %ymm1
+; AVX512DQ-NEXT: vpternlogq $226, %ymm3, %ymm7, %ymm1
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
@@ -2678,10 +2653,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512DQ-FCP-LABEL: store_i8_stride5_vf32:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm6
@@ -2710,26 +2685,23 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm7, %zmm7
; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u]
; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u,u],zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u,u,19]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u,u],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u,u,19]
; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21,u],zero,ymm3[20],zero,ymm3[22],zero,ymm3[24,u],zero,ymm3[23],zero,ymm3[25,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm9, %ymm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[21],zero,zero,ymm0[20],zero,ymm0[22],zero,ymm0[24],zero,zero,ymm0[23],zero,ymm0[25],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512DQ-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm2[21],zero,ymm2[21,20],zero,ymm2[22],zero,ymm2[24],zero,ymm2[22,23],zero,ymm2[25]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,0,5,5,5,5,0,6]
; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm8, %ymm8
@@ -2738,17 +2710,15 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[12],zero,zero,zero,zero,ymm4[13],zero,zero,zero,zero,ymm4[14],zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,ymm4[18],zero
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm3[26],zero,ymm3[28,u],zero,ymm3[u],zero,ymm3[29],zero,ymm3[31,u],zero,ymm3[30]
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm10, %ymm3, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[26],zero,ymm0[28],zero,zero,ymm0[27],zero,ymm0[29],zero,ymm0[31],zero,zero,ymm0[30],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[27,u],zero,ymm1[26],zero,ymm1[28],zero,ymm1[30,u],zero,ymm1[29],zero,ymm1[31,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm2, %ymm6, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm2, %ymm6, %ymm0
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [6,6,6,0,7,7,7,7]
; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
@@ -2792,26 +2762,24 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512BW-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512BW-NEXT: vpermd %ymm4, %ymm8, %ymm8
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512BW-NEXT: vpermd %ymm4, %ymm7, %ymm7
; AVX512BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
+; AVX512BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -2854,11 +2822,11 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-LABEL: store_i8_stride5_vf32:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm3
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm6
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero
; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
@@ -2871,39 +2839,37 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm8[6],zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9],zero,xmm8[11,u],zero,xmm8[10],zero,xmm8[12]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[6],zero,xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[11],zero,xmm7[u,10],zero,xmm7[12],zero
; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,0,1,1,4,4,5,5]
; AVX512BW-FCP-NEXT: movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm4 {%k1}
; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm6, %zmm6
; AVX512BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm8, %ymm8
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm7, %ymm7
; AVX512BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm3[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -2912,16 +2878,14 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,zero,zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512BW-FCP-NEXT: movl $415641996, %eax # imm = 0x18C6318C
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
@@ -2932,7 +2896,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa %ymm0, 128(%r9)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, (%r9)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -2970,26 +2934,24 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512DQ-BW-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512DQ-BW-NEXT: vpermd %ymm4, %ymm8, %ymm8
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512DQ-BW-NEXT: vpermd %ymm4, %ymm7, %ymm7
; AVX512DQ-BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
+; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512DQ-BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -3032,11 +2994,11 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-LABEL: store_i8_stride5_vf32:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm6
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
@@ -3049,39 +3011,37 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm8[6],zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9],zero,xmm8[11,u],zero,xmm8[10],zero,xmm8[12]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[6],zero,xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[11],zero,xmm7[u,10],zero,xmm7[12],zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,0,1,1,4,4,5,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm6, %zmm6
; AVX512DQ-BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,3,3,0,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm8, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [3,3,3,0,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm7, %ymm7
; AVX512DQ-BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k1} = ymm3[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
@@ -3090,16 +3050,14 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,zero,zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-BW-FCP-NEXT: movl $415641996, %eax # imm = 0x18C6318C
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
@@ -3110,7 +3068,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, 128(%r9)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, (%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
@@ -4148,209 +4106,200 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX2-LABEL: store_i8_stride5_vf64:
; AVX2: # %bb.0:
-; AVX2-NEXT: subq $312, %rsp # imm = 0x138
-; AVX2-NEXT: vmovdqa 32(%rdi), %ymm10
+; AVX2-NEXT: subq $248, %rsp
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm4
; AVX2-NEXT: vmovdqa (%rcx), %xmm1
; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rcx), %xmm11
-; AVX2-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX2-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa (%rdx), %xmm3
; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rdx), %xmm12
-; AVX2-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovdqa 32(%rdx), %xmm10
+; AVX2-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm3
; AVX2-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vmovdqa (%rdi), %xmm4
-; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovdqa (%rdi), %xmm5
+; AVX2-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm4
-; AVX2-NEXT: vmovdqa (%rsi), %xmm14
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX2-NEXT: vpshufb %xmm5, %xmm14, %xmm8
-; AVX2-NEXT: vpor %xmm4, %xmm8, %xmm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255]
-; AVX2-NEXT: vpblendvb %ymm8, %ymm1, %ymm4, %ymm1
-; AVX2-NEXT: vmovdqa (%r8), %xmm4
-; AVX2-NEXT: vmovdqa %xmm4, (%rsp) # 16-byte Spill
-; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,2]
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm9, %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
+; AVX2-NEXT: vmovdqa (%rsi), %xmm6
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm8 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX2-NEXT: vpshufb %xmm8, %xmm6, %xmm9
+; AVX2-NEXT: vpor %xmm5, %xmm9, %xmm5
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255]
+; AVX2-NEXT: vpblendvb %ymm9, %ymm1, %ymm5, %ymm1
+; AVX2-NEXT: vmovdqa (%r8), %xmm5
+; AVX2-NEXT: vmovdqa %xmm5, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm5, %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rdi), %xmm4
-; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm0
-; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm1
+; AVX2-NEXT: vmovdqa 32(%rdi), %xmm5
+; AVX2-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm0
+; AVX2-NEXT: vpshufb %xmm2, %xmm10, %xmm1
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovdqa 32(%rsi), %xmm2
; AVX2-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm1
-; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm1
+; AVX2-NEXT: vpshufb %xmm8, %xmm2, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX2-NEXT: vmovdqa 32(%rsi), %ymm11
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqa 32(%r8), %xmm1
; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1]
-; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23]
-; AVX2-NEXT: vpshufb %ymm15, %ymm10, %ymm1
-; AVX2-NEXT: vmovdqa %ymm10, %ymm11
-; AVX2-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
-; AVX2-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX2-NEXT: vpshufb %ymm5, %ymm2, %ymm3
-; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
+; AVX2-NEXT: vpshufb %ymm15, %ymm4, %ymm1
+; AVX2-NEXT: vmovdqa %ymm4, %ymm13
+; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
+; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm4, %ymm11, %ymm3
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vmovdqa 32(%rcx), %ymm7
+; AVX2-NEXT: vmovdqa 32(%rdx), %ymm12
+; AVX2-NEXT: vmovdqa 32(%rcx), %ymm14
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX2-NEXT: vpshufb %ymm3, %ymm7, %ymm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm4[2,2,3,3]
-; AVX2-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
-; AVX2-NEXT: vpshufb %ymm4, %ymm13, %ymm12
-; AVX2-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX2-NEXT: vpor %ymm9, %ymm12, %ymm9
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
-; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm9, %ymm9
-; AVX2-NEXT: vmovdqa (%rdi), %ymm6
-; AVX2-NEXT: vpshufb %ymm15, %ymm6, %ymm1
-; AVX2-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpshufb %ymm3, %ymm14, %ymm8
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
+; AVX2-NEXT: vpshufb %ymm5, %ymm12, %ymm10
+; AVX2-NEXT: vpor %ymm8, %ymm10, %ymm8
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
+; AVX2-NEXT: vpblendvb %ymm10, %ymm1, %ymm8, %ymm2
+; AVX2-NEXT: vmovdqa (%rdi), %ymm9
+; AVX2-NEXT: vpshufb %ymm15, %ymm9, %ymm1
; AVX2-NEXT: vmovdqa (%rsi), %ymm15
-; AVX2-NEXT: vpshufb %ymm5, %ymm15, %ymm5
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX2-NEXT: vpor %ymm1, %ymm5, %ymm5
-; AVX2-NEXT: vmovdqa (%rcx), %ymm10
-; AVX2-NEXT: vpshufb %ymm3, %ymm10, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm3[2,2,3,3]
-; AVX2-NEXT: vmovdqa (%rdx), %ymm8
-; AVX2-NEXT: vpshufb %ymm4, %ymm8, %ymm4
+; AVX2-NEXT: vpshufb %ymm4, %ymm15, %ymm4
+; AVX2-NEXT: vpor %ymm1, %ymm4, %ymm4
+; AVX2-NEXT: vmovdqa (%rcx), %ymm7
+; AVX2-NEXT: vpshufb %ymm3, %ymm7, %ymm0
+; AVX2-NEXT: vmovdqa (%rdx), %ymm3
+; AVX2-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX2-NEXT: vpor %ymm0, %ymm5, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX2-NEXT: vpor %ymm0, %ymm4, %ymm0
-; AVX2-NEXT: vpblendvb %ymm12, %ymm5, %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa 32(%r8), %ymm12
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm12[0,2,1,1,4,6,5,5]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX2-NEXT: vpblendvb %ymm10, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa 32(%r8), %ymm10
+; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm10[0,2,1,1,4,6,5,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,3,2]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm5, %ymm9, %ymm4, %ymm1
+; AVX2-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqa (%r8), %ymm9
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm9[0,2,1,1,4,6,5,5]
+; AVX2-NEXT: vmovdqa (%r8), %ymm8
+; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm8[0,2,1,1,4,6,5,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,3,2]
; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,0,4,4,4,4]
-; AVX2-NEXT: vpermd %ymm11, %ymm3, %ymm4
+; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,3,3,0,4,4,4,4]
+; AVX2-NEXT: vpermd %ymm13, %ymm2, %ymm4
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
-; AVX2-NEXT: vpshufb %ymm5, %ymm2, %ymm0
+; AVX2-NEXT: vpshufb %ymm5, %ymm11, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpermd %ymm6, %ymm3, %ymm2
+; AVX2-NEXT: vpermd %ymm9, %ymm2, %ymm2
; AVX2-NEXT: vpshufb %ymm5, %ymm15, %ymm4
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm4, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX2-NEXT: vmovdqa %ymm7, %ymm3
-; AVX2-NEXT: vpshufb %ymm2, %ymm7, %ymm4
+; AVX2-NEXT: vpshufb %ymm2, %ymm14, %ymm4
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128]
-; AVX2-NEXT: vpshufb %ymm5, %ymm13, %ymm11
-; AVX2-NEXT: vpor %ymm4, %ymm11, %ymm4
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
-; AVX2-NEXT: vpblendvb %ymm11, %ymm0, %ymm4, %ymm0
-; AVX2-NEXT: vpshufb %ymm2, %ymm10, %ymm2
-; AVX2-NEXT: vpshufb %ymm5, %ymm8, %ymm4
+; AVX2-NEXT: vpshufb %ymm5, %ymm12, %ymm13
+; AVX2-NEXT: vpor %ymm4, %ymm13, %ymm4
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm13 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
+; AVX2-NEXT: vpblendvb %ymm13, %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpshufb %ymm2, %ymm7, %ymm2
+; AVX2-NEXT: vpshufb %ymm5, %ymm3, %ymm4
; AVX2-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX2-NEXT: vpblendvb %ymm11, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpblendvb %ymm13, %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,3,3,3,0,4,4,4]
-; AVX2-NEXT: vpermd %ymm12, %ymm2, %ymm4
+; AVX2-NEXT: vpermd %ymm10, %ymm2, %ymm4
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpermd %ymm9, %ymm2, %ymm0
+; AVX2-NEXT: vpermd %ymm8, %ymm2, %ymm0
; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX2-NEXT: vpshufb %xmm14, %xmm0, %xmm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255]
-; AVX2-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm6
-; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX2-NEXT: vpshufb %xmm14, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX2-NEXT: vpshufb %xmm13, %xmm0, %xmm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,0,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,1,1]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255]
+; AVX2-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm4
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
+; AVX2-NEXT: vpshufb %xmm13, %xmm1, %xmm1
+; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX2-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
+; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm2
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX2-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1]
+; AVX2-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; AVX2-NEXT: vpshufd $80, (%rsp), %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = mem[0,0,1,1]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm6, %ymm1, %ymm6
+; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-NEXT: # xmm1 = mem[0,0,1,1]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm7
+; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm6
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm1
-; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm13[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
+; AVX2-NEXT: vpshufb %ymm0, %ymm14, %ymm1
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm12[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0,255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0]
-; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpshufb %ymm0, %ymm10, %ymm0
-; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0,255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0]
+; AVX2-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpshufb %ymm0, %ymm7, %ymm0
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm3[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14]
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm3
-; AVX2-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX2-NEXT: # ymm4 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255,255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255]
-; AVX2-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX2-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpshufb %ymm2, %ymm11, %ymm3
+; AVX2-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX2-NEXT: # ymm5 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255,255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255]
+; AVX2-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX2-NEXT: vpblendvb %ymm7, %ymm3, %ymm5, %ymm3
; AVX2-NEXT: vpshufb %ymm2, %ymm15, %ymm2
-; AVX2-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX2-NEXT: # ymm4 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
-; AVX2-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm5 = ymm9[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpblendvb %ymm7, %ymm2, %ymm5, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
+; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX2-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpblendvb %ymm5, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm10[2,2,3,3,6,6,7,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm9[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[2,2,3,3,6,6,7,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
@@ -4362,21 +4311,21 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm2, 256(%r9)
; AVX2-NEXT: vmovdqa %ymm0, 128(%r9)
-; AVX2-NEXT: vmovdqa %ymm7, 160(%r9)
+; AVX2-NEXT: vmovdqa %ymm6, 160(%r9)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 192(%r9)
; AVX2-NEXT: vmovdqa %ymm1, 288(%r9)
-; AVX2-NEXT: vmovdqa %ymm6, (%r9)
+; AVX2-NEXT: vmovdqa %ymm4, (%r9)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 32(%r9)
-; AVX2-NEXT: addq $312, %rsp # imm = 0x138
+; AVX2-NEXT: addq $248, %rsp
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i8_stride5_vf64:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: subq $200, %rsp
-; AVX2-FP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX2-FP-NEXT: vmovdqa 32(%rdx), %ymm12
; AVX2-FP-NEXT: vmovdqa (%rcx), %xmm1
; AVX2-FP-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
; AVX2-FP-NEXT: vmovdqa 32(%rcx), %xmm8
@@ -4420,7 +4369,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpshufb %xmm3, %xmm4, %xmm1
; AVX2-FP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
; AVX2-FP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX2-FP-NEXT: vmovdqa 32(%rcx), %ymm2
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
; AVX2-FP-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm0
@@ -4431,106 +4380,98 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,29,26,128,28,128,30,128,28,29,128,31,128,29]
-; AVX2-FP-NEXT: vpshufb %ymm0, %ymm11, %ymm1
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm12, %ymm1
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
; AVX2-FP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm3, %ymm2
-; AVX2-FP-NEXT: vmovdqa %ymm3, %ymm12
-; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm8, %ymm2, %ymm3
+; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm14
+; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX2-FP-NEXT: vmovdqa 32(%rsi), %ymm11
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,29,26,128,28,128,26,27,28,29,128,31,128,29,30,128]
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm4, %ymm2
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,2,3,3]
-; AVX2-FP-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm4, %ymm6
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
; AVX2-FP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm2, %ymm7
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm11, %ymm7
; AVX2-FP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm1, %ymm6, %ymm3
; AVX2-FP-NEXT: vmovdqa (%rdx), %ymm13
; AVX2-FP-NEXT: vpshufb %ymm0, %ymm13, %ymm0
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa (%rcx), %ymm7
; AVX2-FP-NEXT: vpshufb %ymm8, %ymm7, %ymm1
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm8
-; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm1, %ymm0
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,2,3,3]
-; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm0, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm2, %ymm5
+; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm1, %ymm9
; AVX2-FP-NEXT: vpor %ymm5, %ymm9, %ymm5
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm5, %ymm8
; AVX2-FP-NEXT: vmovdqa 32(%r8), %ymm5
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm9 = ymm5[2,2,3,3,6,6,7,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm3, %ymm9, %ymm3
-; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm3, %ymm9, %ymm0
+; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa (%r8), %ymm3
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm9 = ymm3[2,2,3,3,6,6,7,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm6
-; AVX2-FP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm0
+; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23]
; AVX2-FP-NEXT: vpshufb %ymm8, %ymm4, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX2-FP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm2, %ymm15
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm11, %ymm15
; AVX2-FP-NEXT: vpor %ymm9, %ymm15, %ymm9
; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX2-FP-NEXT: # ymm15 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vpshufb %ymm15, %ymm12, %ymm14
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm14, %ymm0
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm12
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm14, %ymm12, %ymm12
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm12, %ymm14
+; AVX2-FP-NEXT: vpor %ymm0, %ymm14, %ymm0
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
-; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm9, %ymm12, %ymm9
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm1, %ymm8
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm0, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm8, %ymm10, %ymm8
-; AVX2-FP-NEXT: vpshufb %ymm15, %ymm7, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
+; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm9, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm8, %ymm2, %ymm8
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm1, %ymm9
+; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm7, %ymm9
; AVX2-FP-NEXT: vpshufb %ymm6, %ymm13, %ymm6
+; AVX2-FP-NEXT: vpor %ymm6, %ymm9, %ymm6
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX2-FP-NEXT: vpor %ymm6, %ymm10, %ymm6
; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm8, %ymm6, %ymm6
; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm8 = ymm5[0,2,1,1,4,6,5,5]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm9, %ymm8, %ymm10
-; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm8 = ymm3[0,2,1,1,4,6,5,5]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm6, %ymm8, %ymm9
-; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,3,3,0,4,4,4,4]
-; AVX2-FP-NEXT: vpermd %ymm4, %ymm6, %ymm4
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm4, %ymm2, %ymm2
-; AVX2-FP-NEXT: vpermd %ymm1, %ymm6, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm0
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm0, %ymm8, %ymm10
+; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm0 = ymm3[0,2,1,1,4,6,5,5]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
+; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm6, %ymm0, %ymm9
+; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [3,3,3,0,4,4,4,4]
+; AVX2-FP-NEXT: vpermd %ymm4, %ymm0, %ymm4
+; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm8
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
+; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm4, %ymm8, %ymm4
+; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
+; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm1, %ymm4, %ymm4
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm2, %ymm2
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128]
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm8
-; AVX2-FP-NEXT: vpor %ymm4, %ymm8, %ymm4
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm12, %ymm8
+; AVX2-FP-NEXT: vpor %ymm2, %ymm8, %ymm2
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
-; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm2, %ymm4, %ymm2
+; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm4, %ymm2, %ymm2
; AVX2-FP-NEXT: vpshufb %ymm1, %ymm7, %ymm1
; AVX2-FP-NEXT: vpshufb %ymm6, %ymm13, %ymm4
; AVX2-FP-NEXT: vpor %ymm1, %ymm4, %ymm1
@@ -4596,8 +4537,8 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-LABEL: store_i8_stride5_vf64:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: subq $168, %rsp
-; AVX2-FCP-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX2-FCP-NEXT: vmovdqa 32(%rcx), %ymm10
+; AVX2-FCP-NEXT: vmovdqa 32(%rdx), %ymm14
+; AVX2-FCP-NEXT: vmovdqa 32(%rcx), %ymm9
; AVX2-FCP-NEXT: vmovdqa (%r8), %ymm11
; AVX2-FCP-NEXT: vmovdqa (%rcx), %xmm1
; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -4643,96 +4584,88 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 32(%r8), %ymm12
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm3, %ymm1
+; AVX2-FCP-NEXT: vmovdqa 32(%r8), %ymm13
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm3, %ymm1
; AVX2-FCP-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,29,26,128,28,128,30,128,28,29,128,31,128,29]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm10, %ymm2
-; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm14
-; AVX2-FCP-NEXT: vmovdqu %ymm10, (%rsp) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm14, %ymm1
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX2-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm2
+; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm15
+; AVX2-FCP-NEXT: vmovdqu %ymm9, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqa 32(%rsi), %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,29,26,128,28,128,26,27,28,29,128,31,128,29,30,128]
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm2
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,2,3,3]
-; AVX2-FCP-NEXT: vmovdqa 32(%rsi), %ymm2
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX2-FCP-NEXT: # ymm15 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm2, %ymm5
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm4
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm5
; AVX2-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u]
; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm1, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vmovdqa (%rdx), %ymm10
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm0
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX2-FCP-NEXT: vmovdqa (%rdx), %ymm12
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm12, %ymm0
; AVX2-FCP-NEXT: vmovdqa (%rcx), %ymm5
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm8
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm1
+; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm10
; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm0
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm0[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm9
; AVX2-FCP-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm15
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm15, %ymm9
-; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm8, %ymm9, %ymm7
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm8
+; AVX2-FCP-NEXT: vpor %ymm9, %ymm8, %ymm8
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm10[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm9, %ymm8, %ymm7
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [6,6,6,6,7,7,7,7]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm8, %ymm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm4, %ymm9, %ymm4
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm8, %ymm9
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm4, %ymm9, %ymm4
; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm8, %ymm4
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm7, %ymm4, %ymm4
; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm7
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm7
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm9
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm9
; AVX2-FCP-NEXT: vpor %ymm7, %ymm9, %ymm7
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX2-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm14, %ymm15
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm15, %ymm10
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm13, %ymm14
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX2-FCP-NEXT: vpor %ymm15, %ymm14, %ymm14
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm14, %ymm15
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm15, %ymm10
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm14, %ymm7
+; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm10, %ymm7
; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm6
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm8
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX2-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm8
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm4
; AVX2-FCP-NEXT: vpor %ymm4, %ymm8, %ymm4
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm6, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [4,6,5,5,5,5,4,6]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm6, %ymm8
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm7, %ymm8, %ymm9
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm6, %ymm8
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm7, %ymm8, %ymm9
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm4, %ymm6, %ymm7
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm4, %ymm6, %ymm7
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [3,3,3,0,4,4,4,4]
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm4, %ymm2
; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm2, %ymm3, %ymm2
; AVX2-FCP-NEXT: vpermd %ymm1, %ymm4, %ymm1
; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm1, %ymm0, %ymm0
@@ -4740,16 +4673,16 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm3 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm13, %ymm6
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm14, %ymm6
; AVX2-FCP-NEXT: vpor %ymm3, %ymm6, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255]
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm2, %ymm3, %ymm2
; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm5, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm3
; AVX2-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,3,0,4,4,4]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm3, %ymm0
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm3, %ymm0
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
; AVX2-FCP-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm3, %ymm2
@@ -4783,7 +4716,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm4, %ymm5
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm2, %ymm5, %ymm2
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm3, %ymm4, %ymm3
; AVX2-FCP-NEXT: vmovdqa %ymm1, 64(%r9)
; AVX2-FCP-NEXT: vmovdqa %ymm0, 224(%r9)
@@ -4805,766 +4738,740 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512-LABEL: store_i8_stride5_vf64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm15, %ymm3, %ymm0
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512-NEXT: vpshufb %ymm9, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa 32(%rsi), %ymm11
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512-NEXT: vpshufb %ymm1, %ymm11, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm18
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512-NEXT: vpshufb %ymm2, %ymm5, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm20
+; AVX512-NEXT: vmovdqa 32(%rdi), %xmm12
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512-NEXT: vpshufb %xmm1, %xmm12, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512-NEXT: vmovdqa 32(%rsi), %xmm10
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512-NEXT: vpshufb %xmm2, %xmm10, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm2, %xmm29
+; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm21
+; AVX512-NEXT: vmovdqa 32(%rcx), %ymm15
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512-NEXT: vpshufb %ymm8, %ymm15, %ymm0
+; AVX512-NEXT: vmovdqa 32(%rdx), %ymm13
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512-NEXT: vpshufb %ymm3, %ymm13, %ymm1
+; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm22
+; AVX512-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512-NEXT: vpshufb %xmm1, %xmm6, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm30
+; AVX512-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512-NEXT: vpshufb %xmm2, %xmm7, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm2, %xmm31
+; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm23
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512-NEXT: # ymm9 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm9, %ymm5, %ymm0
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm24
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm5, %ymm11, %ymm1
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm2, %ymm11, %ymm11
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm26
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm11, %ymm13, %ymm1
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm0, %ymm15, %ymm14
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm14, %zmm25
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm1, %ymm15, %ymm14
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm15, %ymm13, %ymm13
+; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm27
+; AVX512-NEXT: vmovdqa (%rcx), %ymm13
+; AVX512-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512-NEXT: vpshufb %ymm3, %ymm14, %ymm3
+; AVX512-NEXT: vporq %ymm8, %ymm3, %ymm16
+; AVX512-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512-NEXT: vpshufb %ymm15, %ymm14, %ymm3
+; AVX512-NEXT: vporq %ymm0, %ymm3, %ymm17
+; AVX512-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512-NEXT: vmovdqa64 %ymm18, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512-NEXT: vmovdqa (%rdi), %ymm8
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512-NEXT: vpshufb %ymm15, %ymm8, %ymm15
+; AVX512-NEXT: vporq %ymm0, %ymm15, %ymm18
+; AVX512-NEXT: vpshufb %ymm4, %ymm8, %ymm0
+; AVX512-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vporq %ymm0, %ymm2, %ymm19
+; AVX512-NEXT: vpshufb %ymm11, %ymm14, %ymm0
+; AVX512-NEXT: vpshufb %ymm1, %ymm13, %ymm1
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512-NEXT: vmovdqa64 %xmm1, %xmm16
-; AVX512-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512-NEXT: vpshufb %xmm14, %xmm4, %xmm1
-; AVX512-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rcx), %ymm8
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512-NEXT: vpshufb %ymm0, %ymm8, %ymm4
-; AVX512-NEXT: vmovdqa 32(%rdx), %ymm11
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512-NEXT: vpshufb %ymm1, %ymm11, %ymm10
-; AVX512-NEXT: vpor %ymm4, %ymm10, %ymm4
-; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rcx), %xmm13
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512-NEXT: vpshufb %xmm6, %xmm13, %xmm4
-; AVX512-NEXT: vmovdqa64 %xmm6, %xmm25
-; AVX512-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512-NEXT: vpshufb %xmm6, %xmm10, %xmm12
-; AVX512-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512-NEXT: vporq %xmm4, %xmm12, %xmm20
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm22
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm23
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,ymm11[26],zero,ymm11[28],zero,ymm11[30],zero,zero,ymm11[29],zero,ymm11[31],zero,zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm8[19],zero,ymm8[21],zero,zero,ymm8[20],zero,ymm8[22],zero,ymm8[24],zero,zero,ymm8[23],zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm24
-; AVX512-NEXT: vmovdqa (%rcx), %ymm12
-; AVX512-NEXT: vpshufb %ymm0, %ymm12, %ymm0
-; AVX512-NEXT: vmovdqa (%rdx), %ymm6
-; AVX512-NEXT: vpshufb %ymm1, %ymm6, %ymm1
-; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm19
-; AVX512-NEXT: vmovdqa (%rsi), %ymm7
-; AVX512-NEXT: vpshufb %ymm15, %ymm7, %ymm2
-; AVX512-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512-NEXT: vpshufb %ymm9, %ymm4, %ymm3
-; AVX512-NEXT: vporq %ymm2, %ymm3, %ymm21
-; AVX512-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-NEXT: vpshufb %xmm5, %xmm0, %xmm3
-; AVX512-NEXT: vmovdqa64 %xmm0, %xmm17
-; AVX512-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512-NEXT: vpshufb %xmm14, %xmm5, %xmm9
-; AVX512-NEXT: vporq %xmm3, %xmm9, %xmm27
-; AVX512-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512-NEXT: vmovdqa64 %xmm26, %xmm1
-; AVX512-NEXT: vpshufb %xmm1, %xmm9, %xmm15
-; AVX512-NEXT: vporq %xmm0, %xmm15, %xmm29
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,2]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
+; AVX512-NEXT: vpshufb %ymm9, %ymm8, %ymm1
+; AVX512-NEXT: vpshufb %ymm5, %ymm3, %ymm2
+; AVX512-NEXT: vmovdqa (%rdi), %xmm5
+; AVX512-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm28, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm5, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512-NEXT: vpshufb %xmm3, %xmm9, %xmm3
+; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm4
+; AVX512-NEXT: vmovdqa (%rdx), %xmm3
+; AVX512-NEXT: vmovdqa 32(%r8), %ymm11
+; AVX512-NEXT: vmovdqa64 %xmm30, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm8, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm13
+; AVX512-NEXT: vpshufb %xmm13, %xmm3, %xmm13
+; AVX512-NEXT: vpor %xmm2, %xmm13, %xmm13
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm14 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512-NEXT: vpshufb %ymm14, %ymm11, %ymm2
+; AVX512-NEXT: vpshufd {{.*#+}} xmm15 = mem[1,1,2,2]
+; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,1,1]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512-NEXT: vpandnq %ymm0, %ymm28, %ymm0
-; AVX512-NEXT: vmovdqa 32(%r8), %ymm15
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512-NEXT: vpshufb %ymm1, %ymm15, %ymm14
-; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm26
-; AVX512-NEXT: vmovdqa (%r8), %ymm0
-; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512-NEXT: vpandnq %ymm0, %ymm30, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm25
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512-NEXT: vpshufb %ymm0, %ymm8, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[19],zero,ymm12[21],zero,zero,ymm12[20],zero,ymm12[22],zero,ymm12[24],zero,zero,ymm12[23],zero
-; AVX512-NEXT: vpshufb %ymm0, %ymm12, %ymm12
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm0, %ymm11, %ymm11
-; AVX512-NEXT: vpshufb %ymm0, %ymm6, %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512-NEXT: vmovdqa64 %xmm31, %xmm4
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512-NEXT: vpermd %zmm15, %zmm1, %zmm31
-; AVX512-NEXT: vmovdqa64 (%r8), %zmm16
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
-; AVX512-NEXT: vpermi2d %zmm15, %zmm16, %zmm1
-; AVX512-NEXT: vmovdqa64 %xmm17, %xmm15
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3],xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512-NEXT: vpshufb %xmm15, %xmm4, %xmm4
-; AVX512-NEXT: vpshufb %xmm15, %xmm5, %xmm5
-; AVX512-NEXT: vinserti32x4 $2, %xmm27, %zmm5, %zmm5
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512-NEXT: vmovdqa64 %xmm18, %xmm15
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3],xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512-NEXT: vpshufb %xmm15, %xmm10, %xmm10
+; AVX512-NEXT: vpandnq %ymm15, %ymm28, %ymm15
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm15, %zmm2
+; AVX512-NEXT: vmovdqa (%r8), %ymm15
+; AVX512-NEXT: vpshufb %ymm14, %ymm15, %ymm14
+; AVX512-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,1,1,4,6,5,5]
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,3,2]
+; AVX512-NEXT: vpandnq %ymm15, %ymm29, %ymm15
+; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm14, %zmm14
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512-NEXT: vpshufb %xmm7, %xmm6, %xmm6
+; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512-NEXT: vpshufb %xmm12, %xmm10, %xmm10
; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512-NEXT: vpshufb %xmm15, %xmm9, %xmm9
-; AVX512-NEXT: vinserti32x4 $2, %xmm29, %zmm9, %zmm9
-; AVX512-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm15 = mem[0,0,1,1]
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload
-; AVX512-NEXT: vpermq {{.*#+}} ymm17 = ymm20[0,0,1,1]
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512-NEXT: vpternlogq $226, %zmm15, %zmm18, %zmm17
-; AVX512-NEXT: vpternlogq $248, %zmm28, %zmm17, %zmm26
-; AVX512-NEXT: vpermq {{.*#+}} zmm15 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpermq {{.*#+}} zmm17 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm20 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512-NEXT: vpternlogq $248, %zmm20, %zmm15, %zmm17
-; AVX512-NEXT: vpandq %ymm20, %ymm8, %ymm8
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm11, %zmm8
-; AVX512-NEXT: vpermq {{.*#+}} zmm11 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vporq %zmm11, %zmm8, %zmm8
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-NEXT: vpternlogq $226, %zmm17, %zmm11, %zmm8
-; AVX512-NEXT: vpternlogd $184, %zmm8, %zmm30, %zmm31
-; AVX512-NEXT: vpor %ymm2, %ymm14, %ymm2
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm19, %zmm2
-; AVX512-NEXT: vpternlogq $248, %ymm20, %ymm13, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
-; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm18, %zmm0
-; AVX512-NEXT: vpternlogq $248, %ymm20, %ymm12, %ymm6
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm6, %zmm2
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm3
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm11, %zmm3
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm25
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
-; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} zmm2 = zmm9[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
+; AVX512-NEXT: vmovdqa64 (%r8), %zmm15
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm30 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
+; AVX512-NEXT: vpermd %zmm11, %zmm30, %zmm30
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm31 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
+; AVX512-NEXT: vpermi2d %zmm11, %zmm15, %zmm31
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; AVX512-NEXT: vpshufb %xmm12, %xmm5, %xmm5
+; AVX512-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
+; AVX512-NEXT: vpshufb %xmm7, %xmm3, %xmm3
+; AVX512-NEXT: vinserti32x4 $2, %xmm13, %zmm3, %zmm3
+; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,0,1,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm20, %zmm5, %zmm5
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512-NEXT: vpternlogq $248, %zmm28, %zmm7, %zmm2
+; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vporq %zmm5, %zmm7, %zmm5
+; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm25[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm27[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm9, %zmm7
+; AVX512-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm30
+; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm19[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
+; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm4[0,0,1,1,4,4,5,5]
+; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
-; AVX512-NEXT: vpermd %zmm16, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512-NEXT: vmovdqa64 %zmm25, 64(%r9)
+; AVX512-NEXT: vpermd %zmm15, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512-NEXT: vmovdqa64 %zmm14, 64(%r9)
; AVX512-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512-NEXT: vmovdqa64 %zmm31, 256(%r9)
-; AVX512-NEXT: vmovdqa64 %zmm26, 192(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm31, 128(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm30, 256(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i8_stride5_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $24, %rsp
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm0
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm2
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm2
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm19
-; AVX512-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm9, %ymm4
-; AVX512-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm12
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm12, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm25
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm4
-; AVX512-FCP-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm21
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm22
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[27],zero,zero,ymm9[26],zero,ymm9[28],zero,ymm9[30],zero,zero,ymm9[29],zero,ymm9[31],zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[19],zero,ymm7[21],zero,zero,ymm7[20],zero,ymm7[22],zero,ymm7[24],zero,zero,ymm7[23],zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm23
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm14
-; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm14, %ymm1
-; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm15
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm1
-; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm20
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm16
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm2
-; AVX512-FCP-NEXT: vporq %xmm1, %xmm2, %xmm28
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm8
-; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm11
-; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm17
-; AVX512-FCP-NEXT: vporq %xmm8, %xmm11, %xmm29
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm3
+; AVX512-FCP-NEXT: vporq %ymm2, %ymm3, %ymm17
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm15
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm15[8,u],zero,xmm15[7],zero,xmm15[9,u,11,u],zero,xmm15[10],zero,xmm15[12,u],zero
+; AVX512-FCP-NEXT: vporq %xmm2, %xmm4, %xmm18
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm6
+; AVX512-FCP-NEXT: vporq %ymm4, %ymm6, %ymm19
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm4
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm30
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm8
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
+; AVX512-FCP-NEXT: vporq %xmm4, %xmm8, %xmm20
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm21
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm28
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm22
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm0
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm8
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm24
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm26
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm10
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm10, %ymm23
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm1
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm25
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm10, %ymm0
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm12
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm1
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm27
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm12, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm1
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm16
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm9
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm9[8],zero,xmm9[u,7],zero,xmm9[9],zero,xmm9[u],zero,xmm9[u,10],zero,xmm9[12],zero,xmm9[u,11]
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm14[8,u],zero,xmm14[7],zero,xmm14[9,u,11,u],zero,xmm14[10],zero,xmm14[12,u],zero
+; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm3
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm13
+; AVX512-FCP-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm1
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm5
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX512-FCP-NEXT: vpor %xmm1, %xmm5, %xmm5
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm2, %ymm11
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[26],zero,ymm12[28],zero,zero,ymm12[27],zero,ymm12[29],zero,ymm12[31],zero,zero,ymm12[30],zero
+; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm2
; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm8
-; AVX512-FCP-NEXT: vpermd %ymm8, %ymm11, %ymm11
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512-FCP-NEXT: vpandnq %ymm11, %ymm25, %ymm11
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm13
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm26
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm31 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm11
-; AVX512-FCP-NEXT: vpermd %ymm11, %ymm31, %ymm27
-; AVX512-FCP-NEXT: vpandnq %ymm27, %ymm30, %ymm27
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm27
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm5
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm14, %ymm4
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[21],zero,zero,ymm6[20],zero,ymm6[22],zero,ymm6[24],zero,zero,ymm6[23],zero,ymm6[25],zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[26],zero,ymm6[28],zero,zero,ymm6[27],zero,ymm6[29],zero,ymm6[31],zero,zero,ymm6[30],zero
-; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm28, %zmm1, %zmm28
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm7[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm9[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm13[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm5[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm3
-; AVX512-FCP-NEXT: vmovdqa64 %xmm17, %xmm5
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm10
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm3
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm29, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,1,2,2,2,2,2,2]
+; AVX512-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm10
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512-FCP-NEXT: vpandn %ymm10, %ymm12, %ymm10
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm10, %zmm2
+; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm10
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm28 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512-FCP-NEXT: vpermd %ymm10, %ymm28, %ymm30
+; AVX512-FCP-NEXT: vpandnq %ymm30, %ymm29, %ymm30
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm30, %zmm4, %zmm4
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm9, %zmm3
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm0, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 (%r8), %zmm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm11
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm29, %zmm21, %zmm22
-; AVX512-FCP-NEXT: vpandq %ymm29, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512-FCP-NEXT: vporq %zmm7, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm22, %zmm7, %zmm1
-; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm31, %zmm8
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm1, %zmm30, %zmm8
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm5, %zmm10
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [2,2,3,3,8,8,9,9]
+; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm11
+; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3],xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm1
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm21[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vporq %zmm6, %zmm7, %zmm6
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm9, %zmm7
+; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm28, %zmm8
+; AVX512-FCP-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm8
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
-; AVX512-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm1 = mem[0,0,1,1]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm8 = mem[0,0,1,1]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm8
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm25, %zmm8, %zmm26
-; AVX512-FCP-NEXT: vpor %ymm4, %ymm9, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm24, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm29, %ymm12, %ymm2
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm20, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm29, %ymm13, %ymm14
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm6
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm7, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm27
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
-; AVX512-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm28[0,0,1,1,4,4,5,5]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
-; AVX512-FCP-NEXT: vpermd %zmm11, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 %zmm27, 64(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm26, 192(%r9)
-; AVX512-FCP-NEXT: addq $24, %rsp
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm18[0,0,1,1]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm6, %zmm6
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm20[0,0,1,1]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm7, %zmm7
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512-FCP-NEXT: vpternlogq $248, %zmm12, %zmm7, %zmm2
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm25[2,2,3,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm23, %zmm6
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm16[2,2,3,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm27, %zmm7
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm1
+; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,1,1,4,4,5,5]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
+; AVX512-FCP-NEXT: vpermd %zmm10, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512-FCP-NEXT: vpermd %zmm5, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 64(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i8_stride5_vf64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm3, %ymm0
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm11
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm11, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm18
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm5, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm20
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm12
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm12, %xmm0
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm10
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm10, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm29
+; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm21
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm15
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm8, %ymm15, %ymm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm13
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm13, %ymm1
+; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm22
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm6, %xmm0
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm30
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm7, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm31
+; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm23
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512DQ-NEXT: # ymm9 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm5, %ymm0
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm24
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512DQ-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm11, %ymm1
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm11, %ymm11
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm26
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm13, %ymm1
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm15, %ymm14
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm14, %zmm25
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512DQ-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm15, %ymm14
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512DQ-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm13, %ymm13
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm27
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm13
+; AVX512DQ-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm14, %ymm3
+; AVX512DQ-NEXT: vporq %ymm8, %ymm3, %ymm16
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm14, %ymm3
+; AVX512DQ-NEXT: vporq %ymm0, %ymm3, %ymm17
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm8
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm8, %ymm15
+; AVX512DQ-NEXT: vporq %ymm0, %ymm15, %ymm18
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm8, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vporq %ymm0, %ymm2, %ymm19
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm14, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm13, %ymm1
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm16
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm4, %xmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm8
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm8, %ymm4
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm11
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm11, %ymm10
-; AVX512DQ-NEXT: vpor %ymm4, %ymm10, %ymm4
-; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm13
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm13, %xmm4
-; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm25
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm10, %xmm12
-; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512DQ-NEXT: vporq %xmm4, %xmm12, %xmm20
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm22
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm23
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,ymm11[26],zero,ymm11[28],zero,ymm11[30],zero,zero,ymm11[29],zero,ymm11[31],zero,zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm8[19],zero,ymm8[21],zero,zero,ymm8[20],zero,ymm8[22],zero,ymm8[24],zero,zero,ymm8[23],zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm24
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm12
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm12, %ymm0
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm6
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm6, %ymm1
-; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm19
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm7
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm7, %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm4, %ymm3
-; AVX512DQ-NEXT: vporq %ymm2, %ymm3, %ymm21
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-NEXT: vpshufb %xmm5, %xmm0, %xmm3
-; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm17
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm5, %xmm9
-; AVX512DQ-NEXT: vporq %xmm3, %xmm9, %xmm27
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512DQ-NEXT: vmovdqa64 %xmm26, %xmm1
-; AVX512DQ-NEXT: vpshufb %xmm1, %xmm9, %xmm15
-; AVX512DQ-NEXT: vporq %xmm0, %xmm15, %xmm29
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,2]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm8, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm3, %ymm2
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm5
+; AVX512DQ-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm5, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm9, %xmm3
+; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm4
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm3
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm11
+; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm8, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm13
+; AVX512DQ-NEXT: vpshufb %xmm13, %xmm3, %xmm13
+; AVX512DQ-NEXT: vpor %xmm2, %xmm13, %xmm13
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm14 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm11, %ymm2
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm15 = mem[1,1,2,2]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,1,1]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512DQ-NEXT: vpandnq %ymm0, %ymm28, %ymm0
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm15
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm15, %ymm14
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm26
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512DQ-NEXT: vpandnq %ymm0, %ymm30, %ymm0
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm25
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm8, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[19],zero,ymm12[21],zero,zero,ymm12[20],zero,ymm12[22],zero,ymm12[24],zero,zero,ymm12[23],zero
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm12, %ymm12
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm11, %ymm11
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm6, %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512DQ-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm4
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512DQ-NEXT: vpermd %zmm15, %zmm1, %zmm31
-; AVX512DQ-NEXT: vmovdqa64 (%r8), %zmm16
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
-; AVX512DQ-NEXT: vpermi2d %zmm15, %zmm16, %zmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm17, %xmm15
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3],xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm4, %xmm4
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm5, %xmm5
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm27, %zmm5, %zmm5
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm15
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3],xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm10, %xmm10
+; AVX512DQ-NEXT: vpandnq %ymm15, %ymm28, %ymm15
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm15, %zmm2
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm15
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm15, %ymm14
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,1,1,4,6,5,5]
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,3,2]
+; AVX512DQ-NEXT: vpandnq %ymm15, %ymm29, %ymm15
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm14, %zmm14
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-NEXT: vpshufb %xmm7, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm10, %xmm10
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm9, %xmm9
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm29, %zmm9, %zmm9
-; AVX512DQ-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm15 = mem[0,0,1,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm17 = ymm20[0,0,1,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm18 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm15, %zmm18, %zmm17
-; AVX512DQ-NEXT: vpternlogq $248, %zmm28, %zmm17, %zmm26
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm15 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm17 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm20 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512DQ-NEXT: vpternlogq $248, %zmm20, %zmm15, %zmm17
-; AVX512DQ-NEXT: vpandq %ymm20, %ymm8, %ymm8
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm11, %zmm8
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm11 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vporq %zmm11, %zmm8, %zmm8
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm17, %zmm11, %zmm8
-; AVX512DQ-NEXT: vpternlogd $184, %zmm8, %zmm30, %zmm31
-; AVX512DQ-NEXT: vpor %ymm2, %ymm14, %ymm2
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm19, %zmm2
-; AVX512DQ-NEXT: vpternlogq $248, %ymm20, %ymm13, %ymm0
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
-; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm18, %zmm0
-; AVX512DQ-NEXT: vpternlogq $248, %ymm20, %ymm12, %ymm6
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm6, %zmm2
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm3
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm11, %zmm3
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm25
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm2 = zmm9[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
+; AVX512DQ-NEXT: vmovdqa64 (%r8), %zmm15
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm30 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
+; AVX512DQ-NEXT: vpermd %zmm11, %zmm30, %zmm30
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm31 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
+; AVX512DQ-NEXT: vpermi2d %zmm11, %zmm15, %zmm31
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm5, %xmm5
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
+; AVX512DQ-NEXT: vpshufb %xmm7, %xmm3, %xmm3
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm13, %zmm3, %zmm3
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,0,1,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm20, %zmm5, %zmm5
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512DQ-NEXT: vpternlogq $248, %zmm28, %zmm7, %zmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vporq %zmm5, %zmm7, %zmm5
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm25[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm27[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm9, %zmm7
+; AVX512DQ-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm30
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm19[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
+; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm4[0,0,1,1,4,4,5,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
-; AVX512DQ-NEXT: vpermd %zmm16, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-NEXT: vmovdqa64 %zmm25, 64(%r9)
+; AVX512DQ-NEXT: vpermd %zmm15, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-NEXT: vmovdqa64 %zmm14, 64(%r9)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm31, 256(%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm26, 192(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm31, 128(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm30, 256(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i8_stride5_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $24, %rsp
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm19
-; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm9, %ymm4
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm12
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm12, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm25
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm26
-; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm21
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm22
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[27],zero,zero,ymm9[26],zero,ymm9[28],zero,ymm9[30],zero,zero,ymm9[29],zero,ymm9[31],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[19],zero,ymm7[21],zero,zero,ymm7[20],zero,ymm7[22],zero,ymm7[24],zero,zero,ymm7[23],zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm23
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm14
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm14, %ymm1
-; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm15
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm1
-; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm20
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm16
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm2
-; AVX512DQ-FCP-NEXT: vporq %xmm1, %xmm2, %xmm28
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm18
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm17
-; AVX512DQ-FCP-NEXT: vporq %xmm8, %xmm11, %xmm29
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm3
+; AVX512DQ-FCP-NEXT: vporq %ymm2, %ymm3, %ymm17
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm15
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm15[8,u],zero,xmm15[7],zero,xmm15[9,u,11,u],zero,xmm15[10],zero,xmm15[12,u],zero
+; AVX512DQ-FCP-NEXT: vporq %xmm2, %xmm4, %xmm18
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm6
+; AVX512DQ-FCP-NEXT: vporq %ymm4, %ymm6, %ymm19
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm30
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
+; AVX512DQ-FCP-NEXT: vporq %xmm4, %xmm8, %xmm20
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
+; AVX512DQ-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm21
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm28
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
+; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm22
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm0
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512DQ-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm24
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
+; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm26
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm10
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm10, %ymm23
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm1
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm25
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm10, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm1
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm27
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm12, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm1
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm16
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm9[8],zero,xmm9[u,7],zero,xmm9[9],zero,xmm9[u],zero,xmm9[u,10],zero,xmm9[12],zero,xmm9[u,11]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm14[8,u],zero,xmm14[7],zero,xmm14[9,u,11,u],zero,xmm14[10],zero,xmm14[12,u],zero
+; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm2, %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[26],zero,ymm12[28],zero,zero,ymm12[27],zero,ymm12[29],zero,ymm12[31],zero,zero,ymm12[30],zero
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm8
-; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512DQ-FCP-NEXT: vpandnq %ymm11, %ymm25, %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm13
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm26
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm31 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm11
-; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm31, %ymm27
-; AVX512DQ-FCP-NEXT: vpandnq %ymm27, %ymm30, %ymm27
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm27
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm5
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512DQ-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm14, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[21],zero,zero,ymm6[20],zero,ymm6[22],zero,ymm6[24],zero,zero,ymm6[23],zero,ymm6[25],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[26],zero,ymm6[28],zero,zero,ymm6[27],zero,ymm6[29],zero,ymm6[31],zero,zero,ymm6[30],zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm1
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm28, %zmm1, %zmm28
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm7[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm9[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm13[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm5[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm5
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm10
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm3
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm29, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,1,2,2,2,2,2,2]
+; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512DQ-FCP-NEXT: vpandn %ymm10, %ymm12, %ymm10
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm10, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm10
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm28 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm28, %ymm30
+; AVX512DQ-FCP-NEXT: vpandnq %ymm30, %ymm29, %ymm30
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm30, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm9, %zmm3
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 (%r8), %zmm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm11
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm29, %zmm21, %zmm22
-; AVX512DQ-FCP-NEXT: vpandq %ymm29, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm7, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm22, %zmm7, %zmm1
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm31, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm1, %zmm30, %zmm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm5, %zmm10
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [2,2,3,3,8,8,9,9]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3],xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm1
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm21[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm6, %zmm7, %zmm6
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm26[2,2,3,3,6,6,7,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm9, %zmm7
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm28, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm8
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
-; AVX512DQ-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm25, %zmm8, %zmm26
-; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm9, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm24, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm29, %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm20, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm29, %ymm13, %ymm14
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm6
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm7, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm27
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
-; AVX512DQ-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm28[0,0,1,1,4,4,5,5]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm11, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, 64(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 128(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, 192(%r9)
-; AVX512DQ-FCP-NEXT: addq $24, %rsp
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm18[0,0,1,1]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm6, %zmm6
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm20[0,0,1,1]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm7, %zmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm12, %zmm7, %zmm2
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm25[2,2,3,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm23, %zmm6
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm16[2,2,3,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm27, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm8, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,1,1,4,4,5,5]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm10, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512DQ-FCP-NEXT: vpermd %zmm5, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 64(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 192(%r9)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i8_stride5_vf64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm3
+; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm2
; AVX512BW-NEXT: vmovdqa (%rcx), %ymm0
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm0, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm8, %ymm0, %ymm3
; AVX512BW-NEXT: vmovdqa (%rdx), %ymm1
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm1[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: movl $693250386, %eax # imm = 0x29522952
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm4, %ymm2 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-NEXT: vmovdqu8 %ymm4, %ymm3 {%k1}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512BW-NEXT: vmovdqa 32(%rdx), %xmm6
; AVX512BW-NEXT: vmovdqa 32(%rcx), %xmm12
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
; AVX512BW-NEXT: vpshufb %xmm7, %xmm4, %xmm4
; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm10
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm10
; AVX512BW-NEXT: vmovdqa (%rsi), %ymm4
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm15 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14]
-; AVX512BW-NEXT: vpshufb %ymm15, %ymm4, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm15, %ymm4, %ymm3
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm5
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm9 = ymm5[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: movl $1251232404, %eax # imm = 0x4A944A94
; AVX512BW-NEXT: kmovd %eax, %k5
-; AVX512BW-NEXT: vmovdqu8 %ymm9, %ymm2 {%k5}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-NEXT: vmovdqu8 %ymm9, %ymm3 {%k5}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512BW-NEXT: vmovdqa 32(%rsi), %xmm13
; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm14
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm2, %zmm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
; AVX512BW-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512BW-NEXT: kmovq %rax, %k4
-; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k4}
+; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k4}
; AVX512BW-NEXT: vmovdqa64 32(%r8), %ymm16
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm10 = [6,6,6,6,7,7,7,7,16,16,16,16,16,16,17,17]
-; AVX512BW-NEXT: vpermi2d %zmm16, %zmm3, %zmm10
+; AVX512BW-NEXT: vpermi2d %zmm16, %zmm2, %zmm10
; AVX512BW-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
; AVX512BW-NEXT: kmovq %rax, %k2
-; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k2}
+; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k2}
; AVX512BW-NEXT: vmovdqa64 32(%rdx), %ymm23
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
; AVX512BW-NEXT: vpshufb %ymm10, %ymm23, %ymm17
@@ -5584,21 +5491,21 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm21 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
; AVX512BW-NEXT: vpshufb %xmm21, %xmm13, %xmm13
; AVX512BW-NEXT: vpor %xmm12, %xmm13, %xmm12
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm12[0,0,1,1]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm14 = ymm12[0,0,1,1]
; AVX512BW-NEXT: vmovdqa64 32(%rdi), %ymm25
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm12 = [3,3,3,0,4,4,4,4]
; AVX512BW-NEXT: vpermd %ymm25, %ymm12, %ymm17
; AVX512BW-NEXT: vmovdqa64 32(%rsi), %ymm26
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-NEXT: kmovd %eax, %k3
-; AVX512BW-NEXT: vpshufb %ymm14, %ymm26, %ymm17 {%k3}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm13, %zmm13
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm26, %ymm17 {%k3}
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm14, %zmm14
; AVX512BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-NEXT: kmovq %rax, %k2
-; AVX512BW-NEXT: vmovdqu8 %zmm13, %zmm6 {%k2}
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm13 = [3,3,3,3,0,4,4,4]
-; AVX512BW-NEXT: vpermd %ymm16, %ymm13, %ymm17
+; AVX512BW-NEXT: vmovdqu8 %zmm14, %zmm6 {%k2}
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm14 = [3,3,3,3,0,4,4,4]
+; AVX512BW-NEXT: vpermd %ymm16, %ymm14, %ymm17
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm18 = mem[1,1,2,2]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,1,1,1]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm18, %zmm17
@@ -5607,32 +5514,28 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqu8 %zmm17, %zmm6 {%k6}
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX512BW-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT: vpshufb %ymm17, %ymm26, %ymm18
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm27 = ymm18[2,2,3,3]
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm26, %ymm27
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
; AVX512BW-NEXT: vpshufb %ymm18, %ymm25, %ymm28
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512BW-NEXT: vporq %ymm27, %ymm28, %ymm27
; AVX512BW-NEXT: vpshufb %ymm15, %ymm26, %ymm15
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm25 = ymm25[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %ymm25, %ymm15 {%k5}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm27, %zmm15
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
; AVX512BW-NEXT: vpshufb %ymm25, %ymm23, %ymm26
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm27 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX512BW-NEXT: # ymm27 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %ymm27, %ymm24, %ymm28
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512BW-NEXT: vporq %ymm26, %ymm28, %ymm26
; AVX512BW-NEXT: vpshufb %ymm8, %ymm24, %ymm8
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm23 = ymm23[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %ymm23, %ymm8 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm26, %zmm8
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,2,3,3,6,6,7,7]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm8 {%k4}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
; AVX512BW-NEXT: vpermd %zmm16, %zmm15, %zmm15
@@ -5661,33 +5564,31 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
-; AVX512BW-NEXT: vpermd %zmm3, %zmm7, %zmm3
+; AVX512BW-NEXT: vpermd %zmm2, %zmm7, %zmm2
; AVX512BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm9 {%k1}
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm1, %ymm3
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm0, %ymm7
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-NEXT: vpor %ymm3, %ymm7, %ymm3
-; AVX512BW-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm0, %ymm0
+; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm9 {%k1}
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm1, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm0, %ymm7
+; AVX512BW-NEXT: vpor %ymm2, %ymm7, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm1, %ymm1
+; AVX512BW-NEXT: vpshufb %ymm27, %ymm0, %ymm0
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512BW-NEXT: vpermd %ymm5, %ymm12, %ymm1
-; AVX512BW-NEXT: vpshufb %ymm14, %ymm4, %ymm1 {%k3}
-; AVX512BW-NEXT: vpshufb %ymm17, %ymm4, %ymm3
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm4, %ymm1 {%k3}
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm4, %ymm2
; AVX512BW-NEXT: vpshufb %ymm18, %ymm5, %ymm4
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512BW-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512BW-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k2}
; AVX512BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512BW-NEXT: vpermd %ymm0, %ymm13, %ymm3
+; AVX512BW-NEXT: vpermd %ymm0, %ymm14, %ymm2
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512BW-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
@@ -5695,215 +5596,206 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa64 %zmm9, (%r9)
; AVX512BW-NEXT: vmovdqa64 %zmm8, 256(%r9)
; AVX512BW-NEXT: vmovdqa64 %zmm6, 192(%r9)
-; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%r9)
+; AVX512BW-NEXT: vmovdqa64 %zmm3, 128(%r9)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride5_vf64:
; AVX512BW-FCP: # %bb.0:
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdx), %ymm21
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm21, %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm13
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm3
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm6
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm18
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm18, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm7
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm20
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm20, %xmm4
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm21, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm8
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm2
+; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm2
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm4
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm5
+; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm17
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm19
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm19, %xmm4
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm3[0,0,1,1]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm16
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,0,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermd %ymm16, %ymm3, %ymm22
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm3, %xmm9
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm16
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm5, %xmm10
+; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm9[0,0,1,1]
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm19
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [3,3,3,0,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermd %ymm19, %ymm9, %ymm22
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm23
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm23, %ymm22 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm22 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm18, %zmm18
; AVX512BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm24
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm15
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm22
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm18
; AVX512BW-FCP-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k3}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX512BW-FCP-NEXT: vpshufb %zmm15, %zmm22, %zmm22
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm16[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm16, %zmm23, %zmm23
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k3}
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm18 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX512BW-FCP-NEXT: vpshufb %zmm18, %zmm23, %zmm23
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm24 = zmm19[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512BW-FCP-NEXT: vpshufb %zmm19, %zmm24, %zmm24
+; AVX512BW-FCP-NEXT: vporq %zmm23, %zmm24, %zmm23
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vporq %zmm22, %zmm23, %zmm23
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm21 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm21, %zmm22, %zmm22
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm25 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm22, %zmm13, %zmm13
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vporq %zmm25, %zmm13, %zmm13
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX512BW-FCP-NEXT: vpshufb %zmm24, %zmm21, %zmm21
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %zmm25, %zmm8, %zmm8
+; AVX512BW-FCP-NEXT: vporq %zmm21, %zmm8, %zmm8
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k3}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm23 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm23
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm8 {%k3}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm21
; AVX512BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
; AVX512BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k4}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm23
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm23[27],zero,zero,ymm23[26],zero,ymm23[28],zero,ymm23[30],zero,zero,ymm23[29],zero,ymm23[31],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm24[2,2,3,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm24
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm26 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,ymm24[26],zero,ymm24[28],zero,ymm24[30],zero,zero,ymm24[29],zero,ymm24[31],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm26 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512BW-FCP-NEXT: vpshufb %xmm26, %xmm18, %xmm18
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm25, %zmm25
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm18
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm20 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,zero,zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm20[2,2,3,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm20
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm28 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,ymm20[27],zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
-; AVX512BW-FCP-NEXT: vporq %ymm27, %ymm28, %ymm27
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm17, %xmm17
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[0,0,1,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm27, %zmm17
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm25, %zmm17 {%k3}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm25 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
-; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm25, %zmm5
-; AVX512BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
-; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm17 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm8
-; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm8, %xmm5
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm26, %xmm6, %xmm6
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm6, %zmm5
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm6
-; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm7
-; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm7, %xmm7
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm8 {%k4}
+; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm12, %xmm15
+; AVX512BW-FCP-NEXT: vpor %xmm14, %xmm15, %xmm14
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm11, %zmm11
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm11[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm13, %xmm11
+; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm16, %xmm15
+; AVX512BW-FCP-NEXT: vpor %xmm11, %xmm15, %xmm11
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm13[0],xmm16[0],xmm13[1],xmm16[1],xmm13[2],xmm16[2],xmm13[3],xmm16[3],xmm13[4],xmm16[4],xmm13[5],xmm16[5],xmm13[6],xmm16[6],xmm13[7],xmm16[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm13, %xmm13
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm13, %zmm11
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,1,1,4,4,5,5]
; AVX512BW-FCP-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
-; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm6 {%k3}
-; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
-; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm7, %zmm7
+; AVX512BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
+; AVX512BW-FCP-NEXT: vpermd %zmm13, %zmm14, %zmm14
; AVX512BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
-; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm7
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm22, %ymm23, %ymm8
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm24, %ymm1
-; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm23, %ymm2
-; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT: vpshufb %ymm15, %ymm18, %ymm2
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm20, %ymm7
+; AVX512BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm16, %ymm7
+; AVX512BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512BW-FCP-NEXT: vpshufb %ymm24, %ymm14, %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm25, %ymm16, %ymm17
+; AVX512BW-FCP-NEXT: vporq %ymm7, %ymm17, %ymm7
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm7, %ymm2
-; AVX512BW-FCP-NEXT: vpermd %ymm20, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm18, %ymm3 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k2}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
-; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm18, %ymm7, %ymm17
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm18
+; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm18, %ymm19
+; AVX512BW-FCP-NEXT: vporq %ymm17, %ymm19, %ymm17
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,2,3,3]
+; AVX512BW-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm9
+; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm9 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
+; AVX512BW-FCP-NEXT: vpermd %zmm13, %zmm6, %zmm6
; AVX512BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, 128(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, 256(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%r9)
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm16[27],zero,zero,ymm16[26],zero,ymm16[28],zero,ymm16[30],zero,zero,ymm16[29],zero,ymm16[31],zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [2,2,3,3,8,8,9,9]
+; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm4
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm7[26],zero,ymm7[28],zero,zero,zero,zero,ymm7[29],zero,ymm7[31],zero,zero,ymm7[30]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,ymm18[27],zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30],zero
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k3}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
+; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
+; AVX512BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
+; AVX512BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%r9)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i8_stride5_vf64:
; AVX512DQ-BW: # %bb.0:
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm3
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm2
; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm0
; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm0, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm0, %ymm3
; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm1[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: movl $693250386, %eax # imm = 0x29522952
; AVX512DQ-BW-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm4, %ymm2 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm4, %ymm3 {%k1}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %xmm6
; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %xmm12
; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm4, %xmm4
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm10
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm10
; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm4
; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm15 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14]
-; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm4, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm4, %ymm3
; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm5
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm9 = ymm5[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: movl $1251232404, %eax # imm = 0x4A944A94
; AVX512DQ-BW-NEXT: kmovd %eax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm9, %ymm2 {%k5}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm9, %ymm3 {%k5}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %xmm13
; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %xmm14
; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
; AVX512DQ-BW-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512DQ-BW-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k4}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k4}
; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %ymm16
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm10 = [6,6,6,6,7,7,7,7,16,16,16,16,16,16,17,17]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm16, %zmm3, %zmm10
+; AVX512DQ-BW-NEXT: vpermi2d %zmm16, %zmm2, %zmm10
; AVX512DQ-BW-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
; AVX512DQ-BW-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k2}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm3 {%k2}
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %ymm23
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm23, %ymm17
@@ -5923,21 +5815,21 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm21 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
; AVX512DQ-BW-NEXT: vpshufb %xmm21, %xmm13, %xmm13
; AVX512DQ-BW-NEXT: vpor %xmm12, %xmm13, %xmm12
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm12[0,0,1,1]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm14 = ymm12[0,0,1,1]
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %ymm25
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm12 = [3,3,3,0,4,4,4,4]
; AVX512DQ-BW-NEXT: vpermd %ymm25, %ymm12, %ymm17
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %ymm26
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512DQ-BW-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-NEXT: kmovd %eax, %k3
-; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm26, %ymm17 {%k3}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm13, %zmm13
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm26, %ymm17 {%k3}
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm14, %zmm14
; AVX512DQ-BW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm13, %zmm6 {%k2}
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm13 = [3,3,3,3,0,4,4,4]
-; AVX512DQ-BW-NEXT: vpermd %ymm16, %ymm13, %ymm17
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm14, %zmm6 {%k2}
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm14 = [3,3,3,3,0,4,4,4]
+; AVX512DQ-BW-NEXT: vpermd %ymm16, %ymm14, %ymm17
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm18 = mem[1,1,2,2]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,1,1,1]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm18, %zmm17
@@ -5946,32 +5838,28 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm17, %zmm6 {%k6}
; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128]
; AVX512DQ-BW-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm26, %ymm18
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm27 = ymm18[2,2,3,3]
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm26, %ymm27
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm18, %ymm25, %ymm28
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512DQ-BW-NEXT: vporq %ymm27, %ymm28, %ymm27
; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm26, %ymm15
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm25 = ymm25[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm25, %ymm15 {%k5}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm27, %zmm15
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm23, %ymm26
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm27 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX512DQ-BW-NEXT: # ymm27 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm24, %ymm28
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
; AVX512DQ-BW-NEXT: vporq %ymm26, %ymm28, %ymm26
; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm24, %ymm8
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm23 = ymm23[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm23, %ymm8 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm26, %zmm8
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm8 {%k4}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
; AVX512DQ-BW-NEXT: vpermd %zmm16, %zmm15, %zmm15
@@ -6000,33 +5888,31 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
-; AVX512DQ-BW-NEXT: vpermd %zmm3, %zmm7, %zmm3
+; AVX512DQ-BW-NEXT: vpermd %zmm2, %zmm7, %zmm2
; AVX512DQ-BW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm9 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm0, %ymm7
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm7, %ymm3
-; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm0, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm2, %zmm9 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm1, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm0, %ymm7
+; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm7, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm1, %ymm1
+; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm0, %ymm0
; AVX512DQ-BW-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512DQ-BW-NEXT: vpermd %ymm5, %ymm12, %ymm1
-; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm4, %ymm1 {%k3}
-; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm4, %ymm3
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm4, %ymm1 {%k3}
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm4, %ymm2
; AVX512DQ-BW-NEXT: vpshufb %ymm18, %ymm5, %ymm4
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k2}
; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-BW-NEXT: vpermd %ymm0, %ymm13, %ymm3
+; AVX512DQ-BW-NEXT: vpermd %ymm0, %ymm14, %ymm2
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512DQ-BW-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
@@ -6034,166 +5920,157 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, (%r9)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, 256(%r9)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, 192(%r9)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 128(%r9)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 128(%r9)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride5_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdx), %ymm21
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm21, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm13
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm6
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm18
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm18, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm7
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm20
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm20, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm21, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm8
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm17
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm19
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm19, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm3[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm16
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,3,3,0,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm16, %ymm3, %ymm22
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm3, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm16
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm5, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm9[0,0,1,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm19
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [3,3,3,0,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm19, %ymm9, %ymm22
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm23
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512DQ-BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm23, %ymm22 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm22 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm18, %zmm18
; AVX512DQ-BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm24
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm22
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [1,1,2,2,2,2,2,2,27,27,27,27,0,28,28,28]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm18
; AVX512DQ-BW-FCP-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm0 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm15, %zmm22, %zmm22
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm16[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm16, %zmm23, %zmm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm18 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm18, %zmm23, %zmm23
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm24 = zmm19[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm19, %zmm24, %zmm24
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm23, %zmm24, %zmm23
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm22, %zmm23, %zmm23
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm21 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm21, %zmm22, %zmm22
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm25 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm22, %zmm13, %zmm13
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm25, %zmm13, %zmm13
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm24, %zmm21, %zmm21
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm25, %zmm8, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm21, %zmm8, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm23 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm24, %zmm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm8 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm22, %zmm21
; AVX512DQ-BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm13 {%k4}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm23
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm23[27],zero,zero,ymm23[26],zero,ymm23[28],zero,ymm23[30],zero,zero,ymm23[29],zero,ymm23[31],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm24[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm24
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm26 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,ymm24[26],zero,ymm24[28],zero,ymm24[30],zero,zero,ymm24[29],zero,ymm24[31],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm26 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm26, %xmm18, %xmm18
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm25, %zmm25
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm18
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm20 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,zero,zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm20[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm20
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm28 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,ymm20[27],zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm27, %ymm28, %ymm27
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm17, %xmm17
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm27, %zmm17
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm25, %zmm17 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm25 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm25, %zmm5
-; AVX512DQ-BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm17 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm8
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm8, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm26, %xmm6, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm6, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm7, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm8 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm12, %xmm15
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm14, %xmm15, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm11, %zmm11
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm11[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm13, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm16, %xmm15
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm11, %xmm15, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm13[0],xmm16[0],xmm13[1],xmm16[1],xmm13[2],xmm16[2],xmm13[3],xmm16[3],xmm13[4],xmm16[4],xmm13[5],xmm16[5],xmm13[6],xmm16[6],xmm13[7],xmm16[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm13, %xmm13
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm13, %zmm11
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,1,1,4,4,5,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm6 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm7, %zmm7
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm13, %zmm14, %zmm14
; AVX512DQ-BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm22, %ymm23, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm24, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm23, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm15, %ymm18, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm20, %ymm7
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm16, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm24, %ymm14, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm25, %ymm16, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm7, %ymm17, %ymm7
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm7, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm20, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm18, %ymm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm1 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm18, %ymm7, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm18
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm18, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm17, %ymm19, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm9 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [3,3,3,3,0,4,4,4,12,14,13,13,13,13,12,14]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm13, %zmm6, %zmm6
; AVX512DQ-BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, 128(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, 256(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm16[27],zero,zero,ymm16[26],zero,ymm16[28],zero,ymm16[30],zero,zero,ymm16[29],zero,ymm16[31],zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [2,2,3,3,8,8,9,9]
+; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm7[26],zero,ymm7[28],zero,zero,zero,zero,ymm7[29],zero,ymm7[31],zero,zero,ymm7[30]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,ymm18[27],zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30],zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
+; AVX512DQ-BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 88144e7..de34e48 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -3004,428 +3004,412 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-LABEL: store_i8_stride6_vf32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512BW-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512BW-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512BW-NEXT: vmovdqa (%r9), %ymm1
-; AVX512BW-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm9, %xmm7
-; AVX512BW-NEXT: vmovdqa (%rdi), %xmm10
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,0,0,1]
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm11, %xmm8
-; AVX512BW-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm12, %xmm13
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512BW-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm9
+; AVX512BW-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512BW-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512BW-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512BW-NEXT: vmovdqa (%r8), %ymm7
+; AVX512BW-NEXT: vmovdqa (%r9), %ymm8
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[16],ymm10[16],ymm9[17],ymm10[17],ymm9[18],ymm10[18],ymm9[19],ymm10[19],ymm9[20],ymm10[20],ymm9[21],ymm10[21],ymm9[22],ymm10[22],ymm9[23],ymm10[23]
+; AVX512BW-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512BW-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512BW-NEXT: vpermw %ymm5, %ymm6, %ymm5
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512BW-NEXT: vprold $16, %ymm6, %ymm6
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu16 %ymm7, %ymm8 {%k1}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-NEXT: vpermw %ymm8, %ymm13, %ymm8
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-NEXT: vprold $16, %xmm13, %xmm13
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512BW-NEXT: movw $9362, %cx # imm = 0x2492
+; AVX512BW-NEXT: vmovdqu16 %zmm5, %zmm0 {%k1}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm5
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm6
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm14 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
; AVX512BW-NEXT: kmovd %ecx, %k2
-; AVX512BW-NEXT: vmovdqu16 %ymm13, %ymm8 {%k2}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm13
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm13, %xmm14
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
-; AVX512BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512BW-NEXT: kmovd %ecx, %k3
-; AVX512BW-NEXT: vmovdqu16 %zmm14, %zmm7 {%k3}
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm16 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-NEXT: vpermw %ymm14, %ymm16, %ymm14
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-NEXT: vprold $16, %ymm16, %ymm16
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
-; AVX512BW-NEXT: vmovdqu16 %ymm16, %ymm14 {%k2}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512BW-NEXT: vpermw %zmm13, %zmm14, %zmm0 {%k2}
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm10, %ymm14
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm9, %ymm13
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm14 = ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15],ymm11[24],ymm12[24],ymm11[25],ymm12[25],ymm11[26],ymm12[26],ymm11[27],ymm12[27],ymm11[28],ymm12[28],ymm11[29],ymm12[29],ymm11[30],ymm12[30],ymm11[31],ymm12[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm15 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm12, %ymm12
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm11, %ymm11
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15],ymm9[24],ymm10[24],ymm9[25],ymm10[25],ymm9[26],ymm10[26],ymm9[27],ymm10[27],ymm9[28],ymm10[28],ymm9[29],ymm10[29],ymm9[30],ymm10[30],ymm9[31],ymm10[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512BW-NEXT: vpermw %ymm9, %ymm10, %ymm9
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-NEXT: vpermw %ymm11, %ymm10, %ymm9 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm14[4,5,6,7]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
-; AVX512BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512BW-NEXT: movl $1227114788, %ecx # imm = 0x49244924
; AVX512BW-NEXT: kmovd %ecx, %k2
-; AVX512BW-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm4, %ymm4
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512BW-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vmovdqu16 %zmm13, %zmm9 {%k2}
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm7, %ymm12
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[16],ymm11[16],ymm12[17],ymm11[17],ymm12[18],ymm11[18],ymm12[19],ymm11[19],ymm12[20],ymm11[20],ymm12[21],ymm11[21],ymm12[22],ymm11[22],ymm12[23],ymm11[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512BW-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm11, %zmm7
; AVX512BW-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
-; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512BW-NEXT: kmovd %ecx, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm7, %zmm9 {%k2}
+; AVX512BW-NEXT: vpshufb %xmm14, %xmm3, %xmm7
+; AVX512BW-NEXT: vpshufb %xmm14, %xmm4, %xmm8
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-NEXT: vpermw %ymm8, %ymm11, %ymm8
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512BW-NEXT: vprold $16, %xmm2, %xmm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
+; AVX512BW-NEXT: movl $1227105426, %ecx # imm = 0x49242492
+; AVX512BW-NEXT: kmovd %ecx, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k2}
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm6, %xmm2
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm9, 128(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride6_vf32:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%r9), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm9
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[4],ymm7[4],ymm4[5],ymm7[5],ymm4[6],ymm7[6],ymm4[7],ymm7[7],ymm4[16],ymm7[16],ymm4[17],ymm7[17],ymm4[18],ymm7[18],ymm4[19],ymm7[19],ymm4[20],ymm7[20],ymm4[21],ymm7[21],ymm4[22],ymm7[22],ymm4[23],ymm7[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm7
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm9[0],ymm4[0],ymm9[1],ymm4[1],ymm9[2],ymm4[2],ymm9[3],ymm4[3],ymm9[4],ymm4[4],ymm9[5],ymm4[5],ymm9[6],ymm4[6],ymm9[7],ymm4[7],ymm9[16],ymm4[16],ymm9[17],ymm4[17],ymm9[18],ymm4[18],ymm9[19],ymm4[19],ymm9[20],ymm4[20],ymm9[21],ymm4[21],ymm9[22],ymm4[22],ymm9[23],ymm4[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm9, %ymm10, %ymm9
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: movl $1227114788, %r10d # imm = 0x49244924
+; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm10
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm9, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm11
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm8
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512BW-FCP-NEXT: movw $9362, %cx # imm = 0x2492
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512BW-FCP-NEXT: vpermw %ymm6, %ymm7, %ymm8 {%k2}
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm13
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm10
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm13
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3],xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm14, %zmm8
+; AVX512BW-FCP-NEXT: movl $1227105426, %ecx # imm = 0x49242492
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm6, %ymm13 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm6
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm13
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm14
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm10, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3],xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm16 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm16, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512BW-FCP-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm6 {%k3}
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
-; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm17, %ymm16 {%k2}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-FCP-NEXT: vpermw %ymm9, %ymm11, %ymm10 {%k1}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm14[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512BW-FCP-NEXT: vpermw %zmm5, %zmm3, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm10[8],xmm15[9],xmm10[9],xmm15[10],xmm10[10],xmm15[11],xmm10[11],xmm15[12],xmm10[12],xmm15[13],xmm10[13],xmm15[14],xmm10[14],xmm15[15],xmm10[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
; AVX512BW-FCP-NEXT: movl $1227133513, %ecx # imm = 0x49249249
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-FCP-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i8_stride6_vf32:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm9, %xmm7
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm10
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,0,0,1]
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm11, %xmm8
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm12, %xmm13
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512DQ-BW-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm9
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm7
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %ymm8
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[16],ymm10[16],ymm9[17],ymm10[17],ymm9[18],ymm10[18],ymm9[19],ymm10[19],ymm9[20],ymm10[20],ymm9[21],ymm10[21],ymm9[22],ymm10[22],ymm9[23],ymm10[23]
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512DQ-BW-NEXT: vpermw %ymm5, %ymm6, %ymm5
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512DQ-BW-NEXT: vprold $16, %ymm6, %ymm6
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm7, %ymm8 {%k1}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512DQ-BW-NEXT: vpermw %ymm8, %ymm13, %ymm8
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-BW-NEXT: vprold $16, %xmm13, %xmm13
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512DQ-BW-NEXT: movw $9362, %cx # imm = 0x2492
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm5, %zmm0 {%k1}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm5
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm6
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm14 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512DQ-BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm13, %ymm8 {%k2}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm13, %xmm14
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512DQ-BW-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
-; AVX512DQ-BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm14, %zmm7 {%k3}
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm16 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm16, %ymm14
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-NEXT: vprold $16, %ymm16, %ymm16
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm16, %ymm14 {%k2}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm14, %zmm0 {%k2}
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm10, %ymm14
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm9, %ymm13
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm14 = ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15],ymm11[24],ymm12[24],ymm11[25],ymm12[25],ymm11[26],ymm12[26],ymm11[27],ymm12[27],ymm11[28],ymm12[28],ymm11[29],ymm12[29],ymm11[30],ymm12[30],ymm11[31],ymm12[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm15 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm12, %ymm12
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm11, %ymm11
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15],ymm9[24],ymm10[24],ymm9[25],ymm10[25],ymm9[26],ymm10[26],ymm9[27],ymm10[27],ymm9[28],ymm10[28],ymm9[29],ymm10[29],ymm9[30],ymm10[30],ymm9[31],ymm10[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512DQ-BW-NEXT: vpermw %ymm9, %ymm10, %ymm9
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512DQ-BW-NEXT: vpermw %ymm11, %ymm10, %ymm9 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm14[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
-; AVX512DQ-BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512DQ-BW-NEXT: movl $1227114788, %ecx # imm = 0x49244924
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm4, %ymm4
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm13, %zmm9 {%k2}
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm7, %ymm12
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[16],ymm11[16],ymm12[17],ymm11[17],ymm12[18],ymm11[18],ymm12[19],ymm11[19],ymm12[20],ymm11[20],ymm12[21],ymm11[21],ymm12[22],ymm11[22],ymm12[23],ymm11[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm11, %zmm7
; AVX512DQ-BW-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm7, %zmm9 {%k2}
+; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm3, %xmm7
+; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm4, %xmm8
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512DQ-BW-NEXT: vpermw %ymm8, %ymm11, %ymm8
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512DQ-BW-NEXT: vprold $16, %xmm2, %xmm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
+; AVX512DQ-BW-NEXT: movl $1227105426, %ecx # imm = 0x49242492
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k2}
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm6, %xmm2
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512DQ-BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 128(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride6_vf32:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[4],ymm7[4],ymm4[5],ymm7[5],ymm4[6],ymm7[6],ymm4[7],ymm7[7],ymm4[16],ymm7[16],ymm4[17],ymm7[17],ymm4[18],ymm7[18],ymm4[19],ymm7[19],ymm4[20],ymm7[20],ymm4[21],ymm7[21],ymm4[22],ymm7[22],ymm4[23],ymm7[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm9[0],ymm4[0],ymm9[1],ymm4[1],ymm9[2],ymm4[2],ymm9[3],ymm4[3],ymm9[4],ymm4[4],ymm9[5],ymm4[5],ymm9[6],ymm4[6],ymm9[7],ymm4[7],ymm9[16],ymm4[16],ymm9[17],ymm4[17],ymm9[18],ymm4[18],ymm9[19],ymm4[19],ymm9[20],ymm4[20],ymm9[21],ymm4[21],ymm9[22],ymm4[22],ymm9[23],ymm4[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm9, %ymm10, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: movl $1227114788, %r10d # imm = 0x49244924
+; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm9, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm11
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512DQ-BW-FCP-NEXT: movw $9362, %cx # imm = 0x2492
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm6, %ymm7, %ymm8 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm13
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3],xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm14, %zmm8
+; AVX512DQ-BW-FCP-NEXT: movl $1227105426, %ecx # imm = 0x49242492
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm6, %ymm13 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm10, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3],xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm16 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm16, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512DQ-BW-FCP-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm6 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm17, %ymm16 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm9, %ymm11, %ymm10 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm14[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm5, %zmm3, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm10[8],xmm15[9],xmm10[9],xmm15[10],xmm10[10],xmm15[11],xmm10[11],xmm15[12],xmm10[12],xmm15[13],xmm10[13],xmm15[14],xmm10[14],xmm15[15],xmm10[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
; AVX512DQ-BW-FCP-NEXT: movl $1227133513, %ecx # imm = 0x49249249
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512DQ-BW-FCP-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 0495e24..8b6ba51 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -3689,10 +3689,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm8, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[27],zero,ymm7[27,28,29,30],zero,ymm7[28],zero,ymm7[26,27,30,31],zero,ymm7[29]
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u]
; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm8, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
@@ -3772,18 +3771,16 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpblendvb %ymm7, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[18],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-NEXT: vpblendvb %ymm7, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm12[18],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-NEXT: vpshuflw {{.*#+}} ymm7 = ymm11[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[0,1,1,3,4,5,5,7]
@@ -3793,23 +3790,20 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
; AVX2-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[23],zero,ymm4[27,20,21,26],zero,ymm4[24],zero,ymm4[26,27,26,27],zero,ymm4[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm5[25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27]
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm8, %ymm1, %ymm7, %ymm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero,zero
; AVX2-NEXT: vmovdqa %ymm12, %ymm13
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
; AVX2-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-NEXT: vmovdqa %ymm11, %ymm12
; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
@@ -3919,22 +3913,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm9, %ymm10, %ymm9
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[17,18,19,30],zero,ymm0[28],zero,ymm0[28,29,30,31],zero,ymm0[29],zero,ymm0[31]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,ymm6[27,28,29,30],zero,ymm6[28],zero,ymm6[26,27,30,31],zero,ymm6[29]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm8[27],zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u]
; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
@@ -3942,22 +3933,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[25],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
@@ -3965,22 +3953,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22],zero,ymm2[20]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm14 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-FP-NEXT: vpblendvb %ymm14, %ymm12, %ymm13, %ymm12
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[20],zero,ymm8[18],zero,zero,zero,zero,ymm8[21],zero,ymm8[19],zero,zero,zero,zero,ymm8[22]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm14 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm14 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm15 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
@@ -4089,46 +4074,40 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm9, %ymm10, %ymm9
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22],zero,ymm2[20]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm7[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [4,5,4,5,5,7,4,5]
; AVX2-FCP-NEXT: vpermd %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[20],zero,ymm8[18],zero,zero,zero,zero,ymm8[21],zero,ymm8[19],zero,zero,zero,zero,ymm8[22]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm12, %ymm11, %ymm11
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[17,18,19,30],zero,ymm0[28],zero,ymm0[28,29,30,31],zero,ymm0[29],zero,ymm0[31]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,ymm6[27,28,29,30],zero,ymm6[28],zero,ymm6[26,27,30,31],zero,ymm6[29]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm8[27],zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u,255,255,255,255,0,0,u]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
@@ -4136,22 +4115,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm12, %ymm13, %ymm12
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[25],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
@@ -4189,153 +4165,138 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512: # %bb.0:
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-NEXT: vmovdqa (%rdx), %ymm11
-; AVX512-NEXT: vmovdqa (%rcx), %ymm3
-; AVX512-NEXT: vmovdqa (%r8), %ymm5
-; AVX512-NEXT: vmovdqa (%r9), %ymm6
-; AVX512-NEXT: vmovdqa (%r10), %ymm4
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm8 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512-NEXT: vpermi2d %zmm7, %zmm8, %zmm9
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
+; AVX512-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512-NEXT: vmovdqa (%r8), %ymm1
+; AVX512-NEXT: vmovdqa (%r9), %ymm2
+; AVX512-NEXT: vmovdqa (%r10), %ymm0
+; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm5[23],zero,ymm5[23,24,25,26],zero,ymm5[24],zero,ymm5[30,31]
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vporq %zmm7, %zmm8, %zmm7
-; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm7
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT: vpandq %ymm16, %ymm8, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm11[18,19,20,21],zero,ymm11[19],zero,ymm11[25,26,27,22],zero,ymm11[20],zero
-; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm9, %zmm8, %zmm9
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm8 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT: vpandq %ymm17, %ymm8, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm11[23],zero,ymm11[21,22,23,26],zero,ymm11[24],zero,ymm11[28,29,26,27]
-; AVX512-NEXT: vmovdqa64 %ymm11, %ymm20
-; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm10, %zmm8, %zmm8
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
-; AVX512-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6,u,u,u],zero
-; AVX512-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero,xmm12[u,u,u,9]
-; AVX512-NEXT: vpor %xmm7, %xmm9, %xmm7
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512-NEXT: vinserti32x4 $2, %xmm7, %zmm9, %zmm7
-; AVX512-NEXT: vpermq {{.*#+}} zmm10 = zmm7[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm7[u,u,u],zero,xmm7[7],zero,xmm7[5,u,u,u],zero,xmm7[8],zero,xmm7[6,u,u]
-; AVX512-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero,xmm9[u,u]
-; AVX512-NEXT: vpor %xmm13, %xmm14, %xmm13
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512-NEXT: vinserti32x4 $2, %xmm13, %zmm14, %zmm13
-; AVX512-NEXT: vpermq {{.*#+}} zmm18 = zmm13[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm18
-; AVX512-NEXT: vmovdqa (%r9), %xmm13
-; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = zero,xmm13[4,u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6]
-; AVX512-NEXT: vmovdqa (%r8), %xmm14
-; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[4],zero,xmm14[u,u,u,7],zero,xmm14[5],zero,xmm14[u,u,u,8],zero,xmm14[6],zero
-; AVX512-NEXT: vpor %xmm10, %xmm15, %xmm10
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm15[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-NEXT: vinserti32x4 $2, %xmm10, %zmm15, %zmm10
-; AVX512-NEXT: vpermq {{.*#+}} zmm19 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vmovdqa (%r10), %xmm15
-; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm15[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm15[1,1,0,0,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
-; AVX512-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,0,1,0,4,4,5,4]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm19, %zmm10
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm10
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14,u,u],zero,zero,zero,zero,ymm1[15,u,u],zero,zero,zero,zero,ymm1[16,u,u],zero,zero,zero,zero,ymm1[17,u,u],zero,zero,zero,zero,ymm1[18]
+; AVX512-NEXT: vporq %zmm8, %zmm9, %zmm8
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
+; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm9 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm18
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm10 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512-NEXT: vpermi2d %zmm7, %zmm9, %zmm10
+; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm7
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm9[u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6,u,u,u],zero
+; AVX512-NEXT: vmovdqa (%rdi), %xmm10
+; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[u,7],zero,xmm10[5],zero,xmm10[u,u,u,8],zero,xmm10[6],zero,xmm10[u,u,u,9]
+; AVX512-NEXT: vpor %xmm8, %xmm11, %xmm8
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512-NEXT: vinserti32x4 $2, %xmm8, %zmm11, %zmm8
+; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vmovdqa (%rcx), %xmm14
+; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm14[u,u,u],zero,xmm14[7],zero,xmm14[5,u,u,u],zero,xmm14[8],zero,xmm14[6,u,u]
+; AVX512-NEXT: vmovdqa (%rdx), %xmm15
+; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm15[u,u,u,7],zero,xmm15[5],zero,xmm15[u,u,u,8],zero,xmm15[6],zero,xmm15[u,u]
+; AVX512-NEXT: vpor %xmm11, %xmm12, %xmm11
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512-NEXT: vinserti32x4 $2, %xmm11, %zmm12, %zmm11
+; AVX512-NEXT: vpermq {{.*#+}} zmm16 = zmm11[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm16
+; AVX512-NEXT: vmovdqa (%r9), %xmm11
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm11[4,u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6]
+; AVX512-NEXT: vmovdqa (%r8), %xmm12
+; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[4],zero,xmm12[u,u,u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero
+; AVX512-NEXT: vpor %xmm8, %xmm13, %xmm8
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512-NEXT: vinserti32x4 $2, %xmm8, %zmm13, %zmm8
+; AVX512-NEXT: vpermq {{.*#+}} zmm17 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vmovdqa (%r10), %xmm13
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm13[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm13[1,1,0,0,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,0,1,0,4,4,5,4]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm8
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm8
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm15[8],xmm14[9],xmm15[9],xmm14[10],xmm15[10],xmm14[11],xmm15[11],xmm14[12],xmm15[12],xmm14[13],xmm15[13],xmm14[14],xmm15[14],xmm14[15],xmm15[15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u,u,u]
-; AVX512-NEXT: vmovdqa64 %ymm20, %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,14],zero,ymm2[u,u,u,u,u,15],zero,ymm2[u,u,u,u,u,16],zero,ymm2[u,u,u,u,u,17],zero,ymm2[u,u,u,u,u]
-; AVX512-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u,u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = ymm5[u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u,u,u]
+; AVX512-NEXT: vpor %ymm1, %ymm14, %ymm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1
; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm5[u,u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[13,u,u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u]
-; AVX512-NEXT: vpor %ymm0, %ymm7, %ymm0
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm15[0,1,2,3,4,5,5,6]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
-; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm4[13,u,u,u,u],zero,zero,ymm4[14,u,u,u,u],zero,zero,ymm4[15,u,u,u,u],zero,zero,ymm4[16,u,u,u,u],zero,zero,ymm4[17,u,u]
-; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm7
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm14
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm14[u,u,u,u,u,14],zero,ymm14[u,u,u,u,u,15],zero,ymm14[u,u,u,u,u,16],zero,ymm14[u,u,u,u,u,17],zero,ymm14[u,u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
+; AVX512-NEXT: vpor %ymm0, %ymm9, %ymm0
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm9 = xmm13[0,1,2,3,4,5,5,6]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm9
+; AVX512-NEXT: vmovdqa64 %ymm18, %ymm11
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm11[13,u,u,u,u],zero,zero,ymm11[14,u,u,u,u],zero,zero,ymm11[15,u,u,u,u],zero,zero,ymm11[16,u,u,u,u],zero,zero,ymm11[17,u,u]
+; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vpternlogq $248, %ymm16, %ymm0, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-NEXT: vpternlogq $248, %ymm17, %ymm1, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vmovdqa64 %ymm18, %ymm3
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
-; AVX512-NEXT: vmovdqa %ymm3, 192(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm10, (%rax)
-; AVX512-NEXT: vmovdqa64 %zmm8, 128(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rax)
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512-NEXT: vmovdqa64 %zmm7, 128(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm9, 64(%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -4343,12 +4304,12 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm5
; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm1
-; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm2
; AVX512-FCP-NEXT: vmovdqa64 (%r10), %ymm17
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm8
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm8[u],zero,xmm8[7],zero,xmm8[5,u,u,u],zero,xmm8[8],zero,xmm8[6,u,u,u],zero
@@ -4388,7 +4349,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512-FCP-NEXT: vpor %ymm0, %ymm15, %ymm0
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
@@ -4403,7 +4364,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm9
; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm1[u,u,u,u,u,14],zero,ymm1[u,u,u,u,u,15],zero,ymm1[u,u,u,u,u,16],zero,ymm1[u,u,u,u,u,17],zero,ymm1[u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
; AVX512-FCP-NEXT: vpor %ymm0, %ymm8, %ymm0
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
@@ -4414,74 +4375,67 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: # ymm10 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm8
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm13
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm13[13,u,u,u,u],zero,zero,ymm13[14,u,u,u,u],zero,zero,ymm13[15,u,u,u,u],zero,zero,ymm13[16,u,u,u,u],zero,zero,ymm13[17,u,u]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm12
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm12[13,u,u,u,u],zero,zero,ymm12[14,u,u,u,u],zero,zero,ymm12[15,u,u,u,u],zero,zero,ymm12[16,u,u,u,u],zero,zero,ymm12[17,u,u]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpand %ymm0, %ymm9, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm0, %zmm9, %zmm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm10, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vporq %zmm9, %zmm10, %zmm9
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm2[19],zero,ymm2[21,20,21,22],zero,ymm2[20],zero,ymm2[22,23]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm10, %zmm11, %zmm10
+; AVX512-FCP-NEXT: vporq %zmm0, %zmm10, %zmm0
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm10 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,5,4,0,5,0,4,0]
+; AVX512-FCP-NEXT: vpermd %ymm10, %ymm11, %ymm10
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[20],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm0, %zmm11, %zmm0
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm13[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,5,4,0,5,0,4,0]
-; AVX512-FCP-NEXT: vpermd %ymm11, %ymm12, %ymm11
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm11
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm10
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm2[28],zero,ymm2[30,31,30,31],zero,ymm2[29],zero,ymm2[31,28,29]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm9, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
-; AVX512-FCP-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa %ymm1, 192(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 128(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 128(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 64(%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
@@ -4490,153 +4444,138 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm11
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm3
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm5
-; AVX512DQ-NEXT: vmovdqa (%r9), %ymm6
-; AVX512DQ-NEXT: vmovdqa (%r10), %ymm4
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm8 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm8, %zmm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm4
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm1
+; AVX512DQ-NEXT: vmovdqa (%r9), %ymm2
+; AVX512DQ-NEXT: vmovdqa (%r10), %ymm0
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm5[23],zero,ymm5[23,24,25,26],zero,ymm5[24],zero,ymm5[30,31]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vporq %zmm7, %zmm8, %zmm7
-; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-NEXT: vpandq %ymm16, %ymm8, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm11[18,19,20,21],zero,ymm11[19],zero,ymm11[25,26,27,22],zero,ymm11[20],zero
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm9, %zmm8, %zmm9
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm8 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512DQ-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-NEXT: vpandq %ymm17, %ymm8, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm11[23],zero,ymm11[21,22,23,26],zero,ymm11[24],zero,ymm11[28,29,26,27]
-; AVX512DQ-NEXT: vmovdqa64 %ymm11, %ymm20
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm10, %zmm8, %zmm8
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm11
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6,u,u,u],zero
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero,xmm12[u,u,u,9]
-; AVX512DQ-NEXT: vpor %xmm7, %xmm9, %xmm7
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm7, %zmm9, %zmm7
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm10 = zmm7[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm7[u,u,u],zero,xmm7[7],zero,xmm7[5,u,u,u],zero,xmm7[8],zero,xmm7[6,u,u]
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero,xmm9[u,u]
-; AVX512DQ-NEXT: vpor %xmm13, %xmm14, %xmm13
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm13, %zmm14, %zmm13
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm18 = zmm13[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm18
-; AVX512DQ-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = zero,xmm13[4,u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6]
-; AVX512DQ-NEXT: vmovdqa (%r8), %xmm14
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[4],zero,xmm14[u,u,u,7],zero,xmm14[5],zero,xmm14[u,u,u,8],zero,xmm14[6],zero
-; AVX512DQ-NEXT: vpor %xmm10, %xmm15, %xmm10
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm15[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm10, %zmm15, %zmm10
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm19 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vmovdqa (%r10), %xmm15
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm15[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm15[1,1,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,0,1,0,4,4,5,4]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm19, %zmm10
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm10
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14,u,u],zero,zero,zero,zero,ymm1[15,u,u],zero,zero,zero,zero,ymm1[16,u,u],zero,zero,zero,zero,ymm1[17,u,u],zero,zero,zero,zero,ymm1[18]
+; AVX512DQ-NEXT: vporq %zmm8, %zmm9, %zmm8
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm9 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm18
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm10 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm9, %zmm10
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm7, %zmm9, %zmm7
+; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm7
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm9[u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6,u,u,u],zero
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm10
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[u,7],zero,xmm10[5],zero,xmm10[u,u,u,8],zero,xmm10[6],zero,xmm10[u,u,u,9]
+; AVX512DQ-NEXT: vpor %xmm8, %xmm11, %xmm8
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm8, %zmm11, %zmm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm14
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm14[u,u,u],zero,xmm14[7],zero,xmm14[5,u,u,u],zero,xmm14[8],zero,xmm14[6,u,u]
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm15
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm15[u,u,u,7],zero,xmm15[5],zero,xmm15[u,u,u,8],zero,xmm15[6],zero,xmm15[u,u]
+; AVX512DQ-NEXT: vpor %xmm11, %xmm12, %xmm11
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm11, %zmm12, %zmm11
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm16 = zmm11[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm16
+; AVX512DQ-NEXT: vmovdqa (%r9), %xmm11
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm11[4,u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6]
+; AVX512DQ-NEXT: vmovdqa (%r8), %xmm12
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[4],zero,xmm12[u,u,u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero
+; AVX512DQ-NEXT: vpor %xmm8, %xmm13, %xmm8
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm8, %zmm13, %zmm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm17 = zmm8[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vmovdqa (%r10), %xmm13
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm13[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm13[1,1,0,0,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,0,1,0,4,4,5,4]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm8
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm8
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm15[8],xmm14[9],xmm15[9],xmm14[10],xmm15[10],xmm14[11],xmm15[11],xmm14[12],xmm15[12],xmm14[13],xmm15[13],xmm14[14],xmm15[14],xmm14[15],xmm15[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u,u,u]
-; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,14],zero,ymm2[u,u,u,u,u,15],zero,ymm2[u,u,u,u,u,16],zero,ymm2[u,u,u,u,u,17],zero,ymm2[u,u,u,u,u]
-; AVX512DQ-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u,u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = ymm5[u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u,u,u]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm14, %ymm1
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1
; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm5[u,u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[13,u,u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u]
-; AVX512DQ-NEXT: vpor %ymm0, %ymm7, %ymm0
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm15[0,1,2,3,4,5,5,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm4[13,u,u,u,u],zero,zero,ymm4[14,u,u,u,u],zero,zero,ymm4[15,u,u,u,u],zero,zero,ymm4[16,u,u,u,u],zero,zero,ymm4[17,u,u]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm7
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm14
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm14[u,u,u,u,u,14],zero,ymm14[u,u,u,u,u,15],zero,ymm14[u,u,u,u,u,16],zero,ymm14[u,u,u,u,u,17],zero,ymm14[u,u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
+; AVX512DQ-NEXT: vpor %ymm0, %ymm9, %ymm0
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm9 = xmm13[0,1,2,3,4,5,5,6]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm9
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm11
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm11[13,u,u,u,u],zero,zero,ymm11[14,u,u,u,u],zero,zero,ymm11[15,u,u,u,u],zero,zero,ymm11[16,u,u,u,u],zero,zero,ymm11[17,u,u]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm16, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512DQ-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm17, %ymm1, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm3
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
-; AVX512DQ-NEXT: vmovdqa %ymm3, 192(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm10, (%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm8, 128(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm7, 64(%rax)
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512DQ-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm7, 128(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm9, 64(%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -4644,12 +4583,12 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa64 (%r10), %ymm17
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm8
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm8[u],zero,xmm8[7],zero,xmm8[5,u,u,u],zero,xmm8[8],zero,xmm8[6,u,u,u],zero
@@ -4689,7 +4628,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm15, %ymm0
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
@@ -4704,7 +4643,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm9
; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm1[u,u,u,u,u,14],zero,ymm1[u,u,u,u,u,15],zero,ymm1[u,u,u,u,u,16],zero,ymm1[u,u,u,u,u,17],zero,ymm1[u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm8, %ymm0
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
@@ -4715,74 +4654,67 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: # ymm10 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm13
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm13[13,u,u,u,u],zero,zero,ymm13[14,u,u,u,u],zero,zero,ymm13[15,u,u,u,u],zero,zero,ymm13[16,u,u,u,u],zero,zero,ymm13[17,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm12[13,u,u,u,u],zero,zero,ymm12[14,u,u,u,u],zero,zero,ymm12[15,u,u,u,u],zero,zero,ymm12[16,u,u,u,u],zero,zero,ymm12[17,u,u]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpand %ymm0, %ymm9, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm4[23,u,u,u],zero,ymm4[26],zero,ymm4[24,u,u,u],zero,ymm4[27],zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm0, %zmm9, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm3[19],zero,ymm3[21,20,21,22],zero,ymm3[20],zero,ymm3[22,23]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm10, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vporq %zmm9, %zmm10, %zmm9
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm2[19],zero,ymm2[21,20,21,22],zero,ymm2[20],zero,ymm2[22,23]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm10, %zmm11, %zmm10
+; AVX512DQ-FCP-NEXT: vporq %zmm0, %zmm10, %zmm0
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm10 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,5,4,0,5,0,4,0]
+; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm11, %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[20],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm0, %zmm11, %zmm0
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm13[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,5,4,0,5,0,4,0]
-; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm12, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm11
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm10
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm2[28],zero,ymm2[30,31,30,31],zero,ymm2[29],zero,ymm2[31,28,29]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm9, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, 192(%rax)
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, 192(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 128(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 128(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 64(%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
@@ -4841,33 +4773,29 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm0 {%k1}
; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm9
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm9[18,19,20,21],zero,zmm9[19],zero,zmm9[25,26,27,22],zero,zmm9[20],zero,zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm9[55],zero,zero,zero,zero,zmm9[58],zero,zmm9[56],zero,zero,zero,zero,zmm9[59],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm15
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[18],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero,zmm15[59],zero,zmm15[57]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vporq %zmm9, %zmm15, %zmm9
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm15 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,1,1,4,4,5,5]
; AVX512BW-NEXT: movl $676341840, %ecx # imm = 0x28502850
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm15 {%k1} = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm16 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm1[23],zero,ymm1[21,22,23,26],zero,ymm1[24],zero,ymm1[28,29,26,27]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm17 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
; AVX512BW-NEXT: vporq %ymm16, %ymm17, %ymm16
; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512BW-NEXT: kmovq %rcx, %k2
; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm9 {%k2}
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512BW-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512BW-NEXT: kmovq %rcx, %k2
; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm16 {%k2}
@@ -4924,10 +4852,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512BW-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512BW-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -4945,8 +4872,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,1,0,1,14],zero,ymm1[14,15,0,1,14,15],zero,ymm1[13,14,15,16,17,16],zero,ymm1[30,31,30,31,16,17],zero,ymm1[31,28,29,30,31]
@@ -4958,8 +4885,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm5, %zmm5
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[0,1,14],zero,ymm3[12,13,0,1,14,15],zero,ymm3[3,12,13,2,3,16],zero,ymm3[30,31,28,29,16,17],zero,ymm3[31,18,19,28,29,18],zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm4[14],zero,zero,zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,zero,zero,ymm4[18]
; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm14
@@ -4993,30 +4920,27 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movabsq $4066998693416279096, %rcx # imm = 0x3870E1C3870E1C38
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm0 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm10
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm10
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,zmm10[19],zero,zmm10[21,20,21,22],zero,zmm10[20],zero,zmm10[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm10[55],zero,zmm10[53,54,55,58],zero,zmm10[56],zero,zmm10[60,61,58,59]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm15
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm15
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20],zero,zero,zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm15[57],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm15
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm10
+; AVX512BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm10
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm10[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm10
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm10[18,19,20,21],zero,zmm10[19],zero,zmm10[25,26,27,22],zero,zmm10[20],zero,zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm10[55],zero,zero,zero,zero,zmm10[58],zero,zmm10[56],zero,zero,zero,zero,zmm10[59],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm16
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm16
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[18],zero,zero,zero,zero,zmm16[21],zero,zmm16[19],zero,zero,zero,zero,zmm16[22],zero,zmm16[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[55],zero,zero,zero,zero,zmm16[58],zero,zmm16[56],zero,zero,zero,zero,zmm16[59],zero,zmm16[57]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm10, %zmm16, %zmm10
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm10 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512BW-FCP-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm16 {%k1}
@@ -5055,16 +4979,14 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm9, %zmm8 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512BW-FCP-NEXT: movl $101455920, %ecx # imm = 0x60C1830
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm1 {%k1}
@@ -5072,10 +4994,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512BW-FCP-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -5143,33 +5064,29 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm0 {%k1}
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm9
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm9[18,19,20,21],zero,zmm9[19],zero,zmm9[25,26,27,22],zero,zmm9[20],zero,zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm9[55],zero,zero,zero,zero,zmm9[58],zero,zmm9[56],zero,zero,zero,zero,zmm9[59],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm15
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[18],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero,zmm15[59],zero,zmm15[57]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vporq %zmm9, %zmm15, %zmm9
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm15 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,1,1,4,4,5,5]
; AVX512DQ-BW-NEXT: movl $676341840, %ecx # imm = 0x28502850
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm15 {%k1} = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm16 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm1[23],zero,ymm1[21,22,23,26],zero,ymm1[24],zero,ymm1[28,29,26,27]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm17 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
; AVX512DQ-BW-NEXT: vporq %ymm16, %ymm17, %ymm16
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512DQ-BW-NEXT: kmovq %rcx, %k2
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm9 {%k2}
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512DQ-BW-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512DQ-BW-NEXT: kmovq %rcx, %k2
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm16 {%k2}
@@ -5226,10 +5143,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -5247,8 +5163,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm4
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm2
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,1,0,1,14],zero,ymm1[14,15,0,1,14,15],zero,ymm1[13,14,15,16,17,16],zero,ymm1[30,31,30,31,16,17],zero,ymm1[31,28,29,30,31]
@@ -5260,8 +5176,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm5, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[0,1,14],zero,ymm3[12,13,0,1,14,15],zero,ymm3[3,12,13,2,3,16],zero,ymm3[30,31,28,29,16,17],zero,ymm3[31,18,19,28,29,18],zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm4[14],zero,zero,zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,zero,zero,ymm4[18]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm14
@@ -5295,30 +5211,27 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movabsq $4066998693416279096, %rcx # imm = 0x3870E1C3870E1C38
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm10
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,zmm10[19],zero,zmm10[21,20,21,22],zero,zmm10[20],zero,zmm10[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm10[55],zero,zmm10[53,54,55,58],zero,zmm10[56],zero,zmm10[60,61,58,59]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm15
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20],zero,zero,zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm15[57],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm15
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm10, %zmm15, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm10[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm10
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm10 = zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm10[18,19,20,21],zero,zmm10[19],zero,zmm10[25,26,27,22],zero,zmm10[20],zero,zmm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm10[55],zero,zero,zero,zero,zmm10[58],zero,zmm10[56],zero,zero,zero,zero,zmm10[59],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm16
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm16
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[18],zero,zero,zero,zero,zmm16[21],zero,zmm16[19],zero,zero,zero,zero,zmm16[22],zero,zmm16[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[55],zero,zero,zero,zero,zmm16[58],zero,zmm16[56],zero,zero,zero,zero,zmm16[59],zero,zmm16[57]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm10, %zmm16, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm10 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
; AVX512DQ-BW-FCP-NEXT: vpermw %zmm7, %zmm15, %zmm15
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm16, %zmm17, %zmm16
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm16 {%k1}
@@ -5357,16 +5270,14 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm9, %zmm8 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: movl $101455920, %ecx # imm = 0x60C1830
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm1 {%k1}
@@ -5374,10 +5285,9 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm2, %ymm2
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: movl $-2130574328, %ecx # imm = 0x81020408
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
@@ -7376,8 +7286,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa 32(%rdx), %ymm6
; AVX2-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa 32(%rcx), %ymm7
@@ -7389,15 +7299,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa 32(%rax), %ymm3
; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,ymm0[27,20,21,26],zero,ymm0[24],zero,ymm0[26,27,26,27],zero,ymm0[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero,zero,ymm1[27],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27]
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,24,25,26,27,24,25,30,31]
@@ -7415,17 +7323,16 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa (%r8), %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vmovdqa (%r9), %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqa (%rax), %ymm1
; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovdqa (%rdx), %ymm1
@@ -7622,12 +7529,11 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm5
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpor %ymm2, %ymm5, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-NEXT: vpshuflw $150, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
; AVX2-NEXT: # ymm5 = mem[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[0,1,1,3,4,5,5,7]
@@ -7635,15 +7541,14 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
; AVX2-NEXT: vpblendvb %ymm6, %ymm2, %ymm5, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm3, %ymm14, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-NEXT: vpshufb %ymm3, %ymm8, %ymm3
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm4, %ymm13, %ymm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX2-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-NEXT: vpshuflw {{.*#+}} ymm4 = ymm8[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-NEXT: vpshuflw {{.*#+}} ymm4 = ymm14[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[0,1,1,3,4,5,5,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,2]
; AVX2-NEXT: vpblendvb %ymm6, %ymm3, %ymm4, %ymm0
@@ -7665,24 +7570,22 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
; AVX2-NEXT: # ymm6 = mem[0,1,0,1]
-; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm6, %ymm10, %ymm7
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm9
-; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX2-NEXT: vpshufb %ymm6, %ymm2, %ymm7
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
+; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-NEXT: vpshufb %ymm3, %ymm10, %ymm9
; AVX2-NEXT: vpor %ymm7, %ymm9, %ymm7
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
; AVX2-NEXT: vpblendvb %ymm9, %ymm7, %ymm4, %ymm4
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm6, %ymm15, %ymm6
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX2-NEXT: vpshufb %ymm3, %ymm12, %ymm7
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpblendvb %ymm9, %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
@@ -7693,26 +7596,22 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm3, %ymm3
; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,ymm0[27,20,21,26],zero,ymm0[24],zero,ymm0[26,27,26,27],zero,ymm0[25]
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
; AVX2-NEXT: vmovdqa %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero,zero,ymm1[27],zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpor %ymm4, %ymm5, %ymm4
; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero,zero,zero,ymm12[27]
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vpor %ymm5, %ymm6, %ymm5
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-NEXT: vpblendvb %ymm3, %ymm4, %ymm5, %ymm4
-; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[25],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero
; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm13[25],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
; AVX2-NEXT: vpor %ymm5, %ymm6, %ymm5
-; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX2-NEXT: vmovdqa %ymm8, %ymm14
+; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
; AVX2-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
@@ -7725,10 +7624,11 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpshufb %ymm7, %ymm4, %ymm8
; AVX2-NEXT: vpor %ymm5, %ymm8, %ymm5
; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm8, %ymm10, %ymm9
-; AVX2-NEXT: vmovdqa %ymm10, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm10, %ymm2, %ymm11
+; AVX2-NEXT: vpshufb %ymm8, %ymm2, %ymm9
+; AVX2-NEXT: vmovdqa %ymm2, %ymm3
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
+; AVX2-NEXT: vpshufb %ymm1, %ymm10, %ymm11
+; AVX2-NEXT: vmovdqa %ymm10, %ymm2
; AVX2-NEXT: vpor %ymm9, %ymm11, %ymm9
; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
; AVX2-NEXT: vpblendvb %ymm11, %ymm5, %ymm9, %ymm5
@@ -7737,7 +7637,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vpshufb %ymm7, %ymm0, %ymm7
; AVX2-NEXT: vpor %ymm6, %ymm7, %ymm6
; AVX2-NEXT: vpshufb %ymm8, %ymm15, %ymm7
-; AVX2-NEXT: vpshufb %ymm10, %ymm12, %ymm8
+; AVX2-NEXT: vpshufb %ymm1, %ymm12, %ymm8
; AVX2-NEXT: vpor %ymm7, %ymm8, %ymm7
; AVX2-NEXT: vpblendvb %ymm11, %ymm6, %ymm7, %ymm6
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [128,1,2,3,0,128,14,128,0,1,0,1,128,15,128,15,128,17,18,19,16,128,30,128,16,17,16,17,128,31,128,31]
@@ -7820,7 +7720,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX2-FP-LABEL: store_i8_stride7_vf64:
; AVX2-FP: # %bb.0:
-; AVX2-FP-NEXT: subq $648, %rsp # imm = 0x288
+; AVX2-FP-NEXT: subq $616, %rsp # imm = 0x268
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-FP-NEXT: vmovdqa 32(%rsi), %ymm7
@@ -7832,20 +7732,18 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm8
; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero,ymm7[27],zero
; AVX2-FP-NEXT: vmovdqa %ymm7, %ymm9
; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm7
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27]
; AVX2-FP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,24,25,26,27,24,25,30,31]
@@ -7865,15 +7763,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[17,18,19,30],zero,ymm6[28],zero,ymm6[28,29,30,31],zero,ymm6[29],zero,ymm6[31]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,26,27,30,31,30,31,28,29,28,29,28,29,28,29]
@@ -8052,184 +7948,167 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa (%rdx), %ymm1
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[17,18,19,30],zero,ymm1[28],zero,ymm1[28,29,30,31],zero,ymm1[29],zero,ymm1[31]
-; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm4
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX2-FP-NEXT: vmovdqa (%rcx), %ymm5
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero
; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm2
-; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX2-FP-NEXT: vmovdqa %ymm1, %ymm12
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm5
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm3, %ymm6, %ymm3
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm2, %ymm3, %ymm6
-; AVX2-FP-NEXT: vmovdqa (%r8), %ymm0
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
-; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX2-FP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FP-NEXT: vmovdqa (%rsi), %ymm0
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
+; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm11
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm13
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero,zero,zero,zero
+; AVX2-FP-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa (%r9), %ymm0
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero
-; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm2, %ymm4, %ymm6
+; AVX2-FP-NEXT: vmovdqa (%r8), %ymm14
+; AVX2-FP-NEXT: vmovdqa (%r9), %ymm1
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,ymm14[27,28,29,30],zero,ymm14[28],zero,ymm14[26,27,30,31],zero,ymm14[29]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero
+; AVX2-FP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT: vpor %ymm2, %ymm7, %ymm7
; AVX2-FP-NEXT: vmovdqa (%rax), %ymm0
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm11
+; AVX2-FP-NEXT: vmovdqa %ymm0, %ymm10
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0]
; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0]
; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[23],zero,ymm5[27,20,21,26],zero,ymm5[24],zero,ymm5[26,27,26,27],zero,ymm5[25]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[23],zero,ymm13[27,20,21,26],zero,ymm13[24],zero,ymm13[26,27,26,27],zero,ymm13[25]
+; AVX2-FP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero,ymm11[27],zero
+; AVX2-FP-NEXT: vmovdqa %ymm11, %ymm2
+; AVX2-FP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm5[25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27]
+; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero,zero,zero,ymm12[27],zero
-; AVX2-FP-NEXT: vmovdqa %ymm12, %ymm14
-; AVX2-FP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm10[25],zero,ymm10[23],zero,zero,zero,zero,ymm10[26],zero,ymm10[24],zero,zero,zero,zero
-; AVX2-FP-NEXT: vmovdqa %ymm10, %ymm13
-; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa %ymm4, %ymm2
-; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero,ymm4[27]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm6, %ymm7, %ymm6
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm6, %ymm8, %ymm6
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero,zero
+; AVX2-FP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
+; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm6, %ymm8, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX2-FP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX2-FP-NEXT: # ymm9 = mem[0,1,0,1]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm0, %ymm7
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm6, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm7, %ymm10, %ymm7
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
-; AVX2-FP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm4, %ymm11
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm0, %ymm8
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm0, %ymm11
+; AVX2-FP-NEXT: vpor %ymm8, %ymm11, %ymm8
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX2-FP-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm11, %ymm7, %ymm12
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm1, %ymm15
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm11, %ymm15, %ymm11
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm7, %ymm11, %ymm0
-; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm1, %ymm15
+; AVX2-FP-NEXT: vpor %ymm12, %ymm15, %ymm12
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm14, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm5, %ymm10
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm8, %ymm12, %ymm4
+; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm3, %ymm10
; AVX2-FP-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm8, %ymm9, %ymm8
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX2-FP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm14, %ymm10
+; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm3
+; AVX2-FP-NEXT: vpshufb %ymm11, %ymm2, %ymm10
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm13, %ymm11
+; AVX2-FP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm11, %ymm3, %ymm12
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm10, %ymm12, %ymm10
-; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31,20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31]
-; AVX2-FP-NEXT: # ymm12 = mem[0,1,0,1]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm13, %ymm15
+; AVX2-FP-NEXT: vpblendvb %ymm15, %ymm9, %ymm10, %ymm9
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX2-FP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm4, %ymm13
+; AVX2-FP-NEXT: vpor %ymm11, %ymm13, %ymm11
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX2-FP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31,20,21,18,19,18,19,20,21,18,19,20,21,28,29,30,31]
+; AVX2-FP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm13, %ymm6, %ymm15
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,2]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm10, %ymm15, %ymm10
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm9, %ymm15, %ymm9
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm11, %ymm2, %ymm11
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm12, %ymm7, %ymm11
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,2]
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm9, %ymm11, %ymm0
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
-; AVX2-FP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm8, %ymm0, %ymm8
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm11, %ymm15, %ymm11
+; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm15 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm15, %ymm10
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm14, %ymm12
+; AVX2-FP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm13, %ymm2, %ymm12
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,2]
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm10, %ymm12, %ymm0
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
+; AVX2-FP-NEXT: vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX2-FP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm9, %ymm0, %ymm9
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX2-FP-NEXT: vpshufb %ymm0, %ymm4, %ymm9
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm1, %ymm11
-; AVX2-FP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm7, %ymm10
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
; AVX2-FP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
-; AVX2-FP-NEXT: vpshufb %ymm4, %ymm6, %ymm6
-; AVX2-FP-NEXT: vpor %ymm6, %ymm12, %ymm6
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm9, %ymm6, %ymm9
+; AVX2-FP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm0, %ymm1, %ymm0
-; AVX2-FP-NEXT: vpshufb %ymm10, %ymm5, %ymm1
-; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm1, %ymm13
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm14, %ymm1, %ymm7
+; AVX2-FP-NEXT: vpor %ymm7, %ymm13, %ymm7
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
+; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm10, %ymm7, %ymm10
+; AVX2-FP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm4, %ymm5, %ymm4
-; AVX2-FP-NEXT: vpor %ymm1, %ymm4, %ymm1
-; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm5, %ymm1
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm14, %ymm3, %ymm3
+; AVX2-FP-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,1,2,3,0,128,14,128,0,1,0,1,128,15,128,15,128,17,18,19,16,128,30,128,16,17,16,17,128,31,128,31]
-; AVX2-FP-NEXT: vpshufb %ymm1, %ymm3, %ymm4
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm4, %ymm3
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm5 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm14, %ymm6
-; AVX2-FP-NEXT: vpor %ymm4, %ymm6, %ymm4
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm13, %ymm10
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
-; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm4, %ymm10, %ymm4
-; AVX2-FP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm15, %ymm3
-; AVX2-FP-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm7, %ymm2
-; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm1, %ymm2, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm8, %ymm7
+; AVX2-FP-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm7 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm6, %ymm11
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
+; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm3, %ymm11, %ymm3
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm15, %ymm4
+; AVX2-FP-NEXT: vpor %ymm1, %ymm4, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm2, %ymm1
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm9, %ymm4, %ymm3
+; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm10, %ymm3, %ymm3
; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FP-NEXT: vmovdqa %ymm0, 96(%rax)
; AVX2-FP-NEXT: vmovdqa %ymm3, 320(%rax)
-; AVX2-FP-NEXT: vmovdqa %ymm8, 128(%rax)
+; AVX2-FP-NEXT: vmovdqa %ymm9, 128(%rax)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 352(%rax)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -8252,13 +8131,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovaps %ymm0, 416(%rax)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 384(%rax)
-; AVX2-FP-NEXT: addq $648, %rsp # imm = 0x288
+; AVX2-FP-NEXT: addq $616, %rsp # imm = 0x268
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i8_stride7_vf64:
; AVX2-FCP: # %bb.0:
-; AVX2-FCP-NEXT: subq $648, %rsp # imm = 0x288
+; AVX2-FCP-NEXT: subq $616, %rsp # imm = 0x268
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-FCP-NEXT: vmovdqa 32(%rsi), %ymm7
@@ -8270,20 +8149,18 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,ymm1[27,20,21,26],zero,ymm1[24],zero,ymm1[26,27,26,27],zero,ymm1[25]
; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm8
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero,ymm7[27],zero
; AVX2-FCP-NEXT: vmovdqa %ymm7, %ymm9
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27]
; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,24,25,26,27,24,25,30,31]
@@ -8303,15 +8180,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[17,18,19,30],zero,ymm6[28],zero,ymm6[28,29,30,31],zero,ymm6[29],zero,ymm6[31]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX2-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,26,27,30,31,30,31,28,29,28,29,28,29,28,29]
@@ -8332,7 +8207,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-FCP-NEXT: vmovdqa %xmm1, %xmm14
-; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
@@ -8349,7 +8224,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm12
; AVX2-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
; AVX2-FCP-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %xmm11, (%rsp) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vmovdqa (%rcx), %xmm2
; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8445,7 +8320,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm10[8],xmm7[8],xmm10[9],xmm7[9],xmm10[10],xmm7[10],xmm10[11],xmm7[11],xmm10[12],xmm7[12],xmm10[13],xmm7[13],xmm10[14],xmm7[14],xmm10[15],xmm7[15]
-; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FCP-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
; AVX2-FCP-NEXT: vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
@@ -8458,7 +8333,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vmovdqa (%rsp), %xmm2 # 16-byte Reload
+; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX2-FCP-NEXT: vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm2 = xmm2[8],mem[8],xmm2[9],mem[9],xmm2[10],mem[10],xmm2[11],mem[11],xmm2[12],mem[12],xmm2[13],mem[13],xmm2[14],mem[14],xmm2[15],mem[15]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -8488,186 +8363,167 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[17,18,19,30],zero,ymm1[28],zero,ymm1[28,29,30,31],zero,ymm1[29],zero,ymm1[31]
-; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm4
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa (%rcx), %ymm2
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX2-FCP-NEXT: vmovdqa (%rcx), %ymm5
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero
; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm2
-; AVX2-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm12
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm3, %ymm6, %ymm3
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm3, %ymm6
-; AVX2-FCP-NEXT: vmovdqa (%r8), %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,ymm0[27,28,29,30],zero,ymm0[28],zero,ymm0[26,27,30,31],zero,ymm0[29]
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa (%rsi), %ymm0
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm11
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm13
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero,zero,zero,zero
+; AVX2-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa (%r9), %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm4, %ymm6
+; AVX2-FCP-NEXT: vmovdqa (%r8), %ymm14
+; AVX2-FCP-NEXT: vmovdqa (%r9), %ymm1
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,ymm14[27,28,29,30],zero,ymm14[28],zero,ymm14[26,27,30,31],zero,ymm14[29]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero
+; AVX2-FCP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vpor %ymm2, %ymm7, %ymm7
; AVX2-FCP-NEXT: vmovdqa (%rax), %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm11
-; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm10
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0]
; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0]
; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[23],zero,ymm12[27,20,21,26],zero,ymm12[24],zero,ymm12[26,27,26,27],zero,ymm12[25]
-; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm13
-; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[23],zero,ymm13[27,20,21,26],zero,ymm13[24],zero,ymm13[26,27,26,27],zero,ymm13[25]
+; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero,ymm11[27],zero
+; AVX2-FCP-NEXT: vmovdqa %ymm11, %ymm2
+; AVX2-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm5[25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27]
+; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm5[23],zero,zero,zero,zero,ymm5[26],zero,ymm5[24],zero,zero,zero,zero,ymm5[27],zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm10[25],zero,ymm10[23],zero,zero,zero,zero,ymm10[26],zero,ymm10[24],zero,zero,zero,zero
-; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm14
-; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa %ymm4, %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero,ymm4[27]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm6, %ymm7, %ymm6
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm6, %ymm8, %ymm6
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero,zero
+; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm0
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm6, %ymm8, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX2-FCP-NEXT: # ymm9 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm7
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm7, %ymm10, %ymm7
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
-; AVX2-FCP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm11
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm8
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm11
+; AVX2-FCP-NEXT: vpor %ymm8, %ymm11, %ymm8
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX2-FCP-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm7, %ymm12
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm15
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm11, %ymm15, %ymm11
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm7, %ymm11, %ymm0
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm14, %ymm8
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm15
+; AVX2-FCP-NEXT: vpor %ymm12, %ymm15, %ymm12
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm9
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm8, %ymm9, %ymm8
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX2-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm14, %ymm10
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm12
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm10, %ymm12, %ymm10
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm12 = ymm13[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [4,5,4,5,5,7,4,5]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm15, %ymm12
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm10, %ymm12, %ymm10
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u,u,0,0,255,255,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm8, %ymm12, %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm10
+; AVX2-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm11
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm11, %ymm10
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FCP-NEXT: vpblendvb %ymm15, %ymm9, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX2-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm11
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm13
+; AVX2-FCP-NEXT: vpor %ymm11, %ymm13, %ymm11
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm11 = ymm7[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX2-FCP-NEXT: vpermd %ymm11, %ymm15, %ymm11
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm9, %ymm11, %ymm0
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm8, %ymm0, %ymm8
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} ymm13 = ymm6[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [4,5,4,5,5,7,4,5]
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm15, %ymm13
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm11, %ymm13, %ymm11
+; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm4 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm14, %ymm12
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FCP-NEXT: vpshuflw $150, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm12 = mem[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX2-FCP-NEXT: vpermd %ymm12, %ymm15, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm10, %ymm12, %ymm0
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm14 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm9, %ymm0, %ymm9
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm11
-; AVX2-FCP-NEXT: vpor %ymm9, %ymm11, %ymm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm10
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,128,14,128,128,128,128,1,128,15,128,128,128,128,2,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128,18,128]
; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
+; AVX2-FCP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm13
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,0,128,14,128,128,128,128,1,128,15,128,128,128,128,128,128,16,128,30,128,128,128,128,17,128,31,128,128,128,128]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpor %ymm6, %ymm12, %ymm6
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm9, %ymm6, %ymm9
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm0
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm1, %ymm7
+; AVX2-FCP-NEXT: vpor %ymm7, %ymm13, %ymm7
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255,255]
+; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm10, %ymm7, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vpor %ymm1, %ymm4, %ymm1
-; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm5, %ymm1
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,1,2,3,0,128,14,128,0,1,0,1,128,15,128,15,128,17,18,19,16,128,30,128,16,17,16,17,128,31,128,31]
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm3
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm14, %ymm6
-; AVX2-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm10
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
-; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm4, %ymm10, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm7
+; AVX2-FCP-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm6, %ymm11
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u,u,u,255,255,0,u,u]
+; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm3, %ymm11, %ymm3
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpor %ymm1, %ymm4, %ymm1
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm3
-; AVX2-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm7, %ymm2
-; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm1, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm1, %ymm2, %ymm1
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm9, %ymm4, %ymm3
+; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm10, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa %ymm0, 96(%rax)
; AVX2-FCP-NEXT: vmovdqa %ymm3, 320(%rax)
-; AVX2-FCP-NEXT: vmovdqa %ymm8, 128(%rax)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm0, 352(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm9, 128(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm14, 352(%rax)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 160(%rax)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -8688,1747 +8544,1675 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovaps %ymm0, 416(%rax)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 384(%rax)
-; AVX2-FCP-NEXT: addq $648, %rsp # imm = 0x288
+; AVX2-FCP-NEXT: addq $616, %rsp # imm = 0x268
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i8_stride7_vf64:
; AVX512: # %bb.0:
-; AVX512-NEXT: subq $1448, %rsp # imm = 0x5A8
-; AVX512-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512-NEXT: vmovdqa %ymm1, %ymm10
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: subq $1720, %rsp # imm = 0x6B8
+; AVX512-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512-NEXT: vpshufb %ymm2, %ymm7, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm26
; AVX512-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
-; AVX512-NEXT: vmovdqa %ymm2, %ymm14
-; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa (%rcx), %ymm6
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm1, %ymm6, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm23
+; AVX512-NEXT: vmovdqa (%rcx), %ymm14
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512-NEXT: vpshufb %ymm2, %ymm14, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm28
; AVX512-NEXT: vmovdqa (%rdx), %ymm8
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512-NEXT: vpshufb %ymm2, %ymm8, %ymm1
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm17
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512-NEXT: vpshufb %ymm3, %ymm8, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm3, %ymm23
+; AVX512-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa (%r8), %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa (%r8), %ymm15
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512-NEXT: vmovdqa (%r9), %ymm1
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb %ymm2, %ymm15, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512-NEXT: vmovdqa (%r9), %ymm2
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
; AVX512-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm22
-; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512-NEXT: vmovdqa 32(%r9), %ymm11
-; AVX512-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512-NEXT: vmovdqa %ymm1, %ymm13
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa 32(%rsi), %ymm10
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128]
+; AVX512-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm6, %ymm10, %ymm0
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm9
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[23],zero,zero,zero,zero,ymm9[26],zero,ymm9[24],zero,zero,zero,zero,ymm9[27],zero,ymm9[25]
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [12,13,14,128,12,128,14,15,14,15,128,13,128,15,12,13,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512-NEXT: vpshufb %ymm1, %ymm9, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm16
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm10[23,u,u,u],zero,ymm10[26],zero,ymm10[24,u,u,u],zero,ymm10[27],zero
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512-NEXT: vmovdqa 32(%rdx), %ymm5
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128]
+; AVX512-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm1, %ymm5, %ymm2
+; AVX512-NEXT: vmovdqa 32(%rcx), %ymm4
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0]
; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rsi), %ymm5
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm3, %ymm4, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm4, %ymm20
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb %ymm0, %ymm4, %ymm2
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
+; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm11
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm21
+; AVX512-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128]
+; AVX512-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm11, %ymm2, %ymm12
+; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: vmovdqa 32(%rax), %ymm4
-; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm2, %ymm4, %ymm4
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb %ymm3, %ymm14, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[21],zero,ymm10[19],zero,zero,zero,zero,ymm10[22],zero,ymm10[20],zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm0, %ymm6, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vmovdqa 32(%rax), %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb %ymm6, %ymm7, %ymm6
+; AVX512-NEXT: vmovdqa64 %ymm7, %ymm17
+; AVX512-NEXT: vmovdqa64 %ymm18, %ymm13
+; AVX512-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm13, %ymm7
+; AVX512-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm8[23],zero,ymm8[21,22,23,26],zero,ymm8[24],zero,ymm8[28,29,26,27]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm8[18,19,20,21],zero,ymm8[19],zero,ymm8[25,26,27,22],zero,ymm8[20],zero
-; AVX512-NEXT: vmovdqa64 %ymm8, %ymm19
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vpshufb %ymm1, %ymm8, %ymm6
+; AVX512-NEXT: vmovdqa64 %ymm20, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm14, %ymm7
+; AVX512-NEXT: vmovdqa64 %ymm14, %ymm22
+; AVX512-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa (%rax), %ymm1
-; AVX512-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm16
-; AVX512-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512-NEXT: vmovdqa 32(%rcx), %xmm0
+; AVX512-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm15, %ymm6
+; AVX512-NEXT: vmovdqa64 %ymm15, %ymm16
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512-NEXT: vpshufb %ymm11, %ymm15, %ymm7
+; AVX512-NEXT: vpor %ymm6, %ymm7, %ymm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa 32(%rdx), %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa64 %xmm2, %xmm29
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512-NEXT: vpshufb %xmm4, %xmm3, %xmm2
-; AVX512-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512-NEXT: vmovdqa64 %xmm3, %xmm30
-; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmovdqa 32(%rcx), %xmm1
+; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512-NEXT: vpshufb %xmm11, %xmm1, %xmm6
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512-NEXT: vpshufb %xmm12, %xmm0, %xmm7
+; AVX512-NEXT: vpor %xmm6, %xmm7, %xmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512-NEXT: vpshufb %xmm12, %xmm4, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512-NEXT: vpshufb %xmm15, %xmm3, %xmm2
-; AVX512-NEXT: vmovdqa64 %xmm3, %xmm21
-; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa 32(%rsi), %xmm14
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512-NEXT: vpshufb %xmm6, %xmm14, %xmm7
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm18
+; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
-; AVX512-NEXT: vmovdqa 32(%rax), %xmm2
-; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,6]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm3
-; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
+; AVX512-NEXT: vmovdqa 32(%rax), %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-NEXT: vpermi2d %zmm7, %zmm8, %zmm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 32(%r9), %xmm0
-; AVX512-NEXT: vmovdqa 32(%r8), %xmm2
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512-NEXT: vpshufb %xmm6, %xmm0, %xmm8
-; AVX512-NEXT: vmovdqa64 %xmm0, %xmm28
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512-NEXT: vpshufb %xmm14, %xmm2, %xmm10
-; AVX512-NEXT: vmovdqa %xmm2, %xmm3
-; AVX512-NEXT: vporq %xmm8, %xmm10, %xmm26
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa 32(%r8), %xmm1
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512-NEXT: vpshufb %xmm8, %xmm0, %xmm7
+; AVX512-NEXT: vmovdqa64 %xmm8, %xmm20
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm30
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm29
+; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm28, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm4, %ymm7
; AVX512-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512-NEXT: vpshufb %ymm0, %ymm7, %ymm8
-; AVX512-NEXT: vmovdqa64 %ymm17, %ymm0
-; AVX512-NEXT: vpshufb %ymm0, %ymm9, %ymm10
-; AVX512-NEXT: vpor %ymm8, %ymm10, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm5, %ymm8
+; AVX512-NEXT: vpor %ymm7, %ymm8, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
-; AVX512-NEXT: vmovdqa64 %ymm5, %ymm23
-; AVX512-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[0,1,14],zero,ymm0[12,13,0,1,14,15],zero,ymm0[3,12,13,2,3,16],zero,ymm0[30,31,28,29,16,17],zero,ymm0[31,18,19,28,29,18],zero
-; AVX512-NEXT: vpor %ymm8, %ymm10, %ymm2
-; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa64 %ymm18, %ymm2
-; AVX512-NEXT: vpshufb %ymm2, %ymm13, %ymm8
-; AVX512-NEXT: vmovdqa64 %ymm22, %ymm2
-; AVX512-NEXT: vpshufb %ymm2, %ymm11, %ymm10
-; AVX512-NEXT: vmovdqa64 %ymm11, %ymm27
-; AVX512-NEXT: vpor %ymm8, %ymm10, %ymm2
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm1, %ymm4, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm21
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm19
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm26, %ymm0
+; AVX512-NEXT: vpshufb %ymm0, %ymm10, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512-NEXT: vpshufb %ymm1, %ymm9, %ymm1
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm7, %ymm10, %ymm0
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,4,5,128,3,128,5,4,5,6,128,4,128,6,7,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512-NEXT: vpshufb %ymm1, %ymm9, %ymm4
+; AVX512-NEXT: vpor %ymm0, %ymm4, %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm5
+; AVX512-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm24, %ymm4
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vmovdqa64 %ymm27, %ymm4
+; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa (%rsi), %xmm2
-; AVX512-NEXT: vpshufb %xmm12, %xmm2, %xmm5
-; AVX512-NEXT: vmovdqa64 %xmm2, %xmm25
-; AVX512-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512-NEXT: vpshufb %xmm15, %xmm12, %xmm9
-; AVX512-NEXT: vporq %xmm5, %xmm9, %xmm22
-; AVX512-NEXT: vmovdqa (%rcx), %xmm13
-; AVX512-NEXT: vmovdqa64 %xmm29, %xmm2
-; AVX512-NEXT: vpshufb %xmm2, %xmm13, %xmm7
-; AVX512-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512-NEXT: vmovdqa64 %xmm31, %xmm2
-; AVX512-NEXT: vpshufb %xmm2, %xmm9, %xmm10
-; AVX512-NEXT: vpor %xmm7, %xmm10, %xmm2
+; AVX512-NEXT: vmovdqa (%rsi), %xmm3
+; AVX512-NEXT: vpshufb %xmm6, %xmm3, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512-NEXT: vmovdqa64 %xmm18, %xmm3
+; AVX512-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm4, %xmm28
+; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vmovdqa (%r9), %xmm2
+; AVX512-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512-NEXT: vpshufb %xmm11, %xmm3, %xmm2
+; AVX512-NEXT: vmovdqa %xmm3, %xmm6
+; AVX512-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512-NEXT: vpshufb %xmm12, %xmm4, %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm4, %xmm18
+; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vpshufb %xmm6, %xmm2, %xmm6
-; AVX512-NEXT: vmovdqa (%r8), %xmm5
-; AVX512-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vpshufb %xmm14, %xmm5, %xmm11
-; AVX512-NEXT: vpor %xmm6, %xmm11, %xmm6
-; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm6
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512-NEXT: vpshufb %xmm7, %xmm11, %xmm11
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm11[0,1,0,1],zmm6[4,5,6,7]
+; AVX512-NEXT: vmovdqa (%r9), %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm20, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512-NEXT: vmovdqa %xmm3, %xmm8
+; AVX512-NEXT: vmovdqa (%r8), %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm30, %xmm4
+; AVX512-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX512-NEXT: vmovdqa %xmm3, %xmm9
+; AVX512-NEXT: vpor %xmm2, %xmm4, %xmm2
; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa (%rax), %xmm10
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,4,5,5,6]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm11
-; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = zero,ymm1[13],zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm11, %zmm24
-; AVX512-NEXT: vmovdqa64 %ymm19, %ymm2
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm5 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero,zero,ymm13[27],zero,ymm13[25]
+; AVX512-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512-NEXT: vpshufb %ymm7, %ymm3, %ymm4
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23,u,u,u],zero,ymm3[26],zero,ymm3[24,u,u,u],zero,ymm3[27],zero
+; AVX512-NEXT: vpshufb %ymm1, %ymm13, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm22, %ymm2
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
+; AVX512-NEXT: vmovdqa64 %ymm21, %ymm3
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm5, %ymm2, %ymm11
-; AVX512-NEXT: vmovdqa64 %ymm5, %ymm29
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
-; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800,18446463693966278655,18446742978476179455,18446463693966278655,18446742978476179455]
-; AVX512-NEXT: vpternlogq $248, %ymm31, %ymm11, %ymm15
-; AVX512-NEXT: vmovdqa64 %xmm28, %xmm6
-; AVX512-NEXT: vmovdqa %xmm3, %xmm8
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm15[0,1,2,3],zmm11[0,1,0,1]
-; AVX512-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512-NEXT: vmovdqa64 %ymm20, %ymm17
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm11 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512-NEXT: vmovdqa64 %ymm23, %ymm5
-; AVX512-NEXT: vpshufb %ymm11, %ymm5, %ymm15
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm19
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm11, %ymm0, %ymm1
-; AVX512-NEXT: vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm11 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm23
-; AVX512-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm2[23],zero,ymm2[21,22,23,26],zero,ymm2[24],zero,ymm2[28,29,26,27]
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm3
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm30
+; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero
+; AVX512-NEXT: vmovdqa64 %ymm16, %ymm3
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufb %ymm10, %ymm15, %ymm1
+; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa (%rax), %ymm4
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm15
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
+; AVX512-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512-NEXT: vmovdqa64 %xmm8, %xmm22
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm2, %xmm24
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm1[0,1,0,1],zmm0[4,5,6,7]
+; AVX512-NEXT: vmovdqa (%rax), %xmm11
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm11[0,1,2,3,4,5,5,6]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
+; AVX512-NEXT: vpshufb %ymm9, %ymm4, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm16
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm27
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm0[2,3,2,3],zmm2[0,1,0,1]
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512-NEXT: vpshufb %xmm0, %xmm15, %xmm3
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512-NEXT: vpshufb %xmm0, %xmm3, %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm26
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm0[2,3,2,3],zmm3[0,1,0,1]
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512-NEXT: vpshufb %xmm0, %xmm13, %xmm13
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm25
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm0[2,3,2,3],zmm13[0,1,0,1]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512-NEXT: vmovdqa %xmm6, %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm18, %xmm7
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512-NEXT: vpshufb %xmm1, %xmm4, %xmm2
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm30
-; AVX512-NEXT: vmovdqa64 %xmm21, %xmm0
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; AVX512-NEXT: vmovdqa64 %xmm25, %xmm3
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm19
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm10[8],xmm14[9],xmm10[9],xmm14[10],xmm10[10],xmm14[11],xmm10[11],xmm14[12],xmm10[12],xmm14[13],xmm10[13],xmm14[14],xmm10[14],xmm14[15],xmm10[15]
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512-NEXT: vmovdqa64 %xmm28, %xmm4
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512-NEXT: vpshufb %xmm0, %xmm14, %xmm13
+; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm21
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15]
-; AVX512-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm26 = zmm26[0,1,0,1],zmm0[0,1,0,1]
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm6
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm8
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512-NEXT: vmovdqa64 %ymm0, %ymm28
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
-; AVX512-NEXT: vmovdqa64 %ymm0, %ymm20
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20],zero,zero
-; AVX512-NEXT: vmovdqa64 %ymm27, %ymm1
-; AVX512-NEXT: vmovdqa64 %ymm29, %ymm0
-; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm4
-; AVX512-NEXT: vmovdqa %ymm2, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512-NEXT: vpshufb %ymm2, %ymm14, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm27
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm2, %ymm14, %ymm14
-; AVX512-NEXT: vmovdqa64 %ymm14, %ymm25
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm0, %ymm29
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm14 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm14
-; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3],xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512-NEXT: vpshufb %xmm14, %xmm15, %xmm15
-; AVX512-NEXT: vpshufb %xmm14, %xmm12, %xmm12
-; AVX512-NEXT: vinserti32x4 $2, %xmm22, %zmm12, %zmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512-NEXT: vpshufb %xmm14, %xmm11, %xmm9
-; AVX512-NEXT: vpshufb %xmm14, %xmm13, %xmm11
-; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm6[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm18[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm22 = ymm4[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm8[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm7[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm5[2,3,2,3]
-; AVX512-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm12 # 16-byte Folded Reload
-; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm11 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm11 = zmm5[0,1,0,1],mem[0,1,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm10[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm7
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm19[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogq $236, %zmm31, %zmm10, %zmm5
-; AVX512-NEXT: vpandq %ymm31, %ymm22, %ymm10
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm10, %zmm1, %zmm1
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655,18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-NEXT: vpand %ymm6, %ymm10, %ymm6
-; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm6, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512-NEXT: vpternlogq $226, %zmm1, %zmm0, %zmm5
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpermq {{.*#+}} zmm6 = zmm23[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogq $236, %zmm10, %zmm1, %zmm6
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm17 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vporq %zmm1, %zmm17, %zmm1
-; AVX512-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm1
-; AVX512-NEXT: vpermq {{.*#+}} zmm6 = zmm30[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpermq {{.*#+}} zmm17 = zmm21[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm17
-; AVX512-NEXT: vpternlogq $248, %ymm10, %ymm13, %ymm14
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm3[2,3,2,3]
-; AVX512-NEXT: vpternlogq $236, %ymm10, %ymm4, %ymm0
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm9[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm14, %zmm3
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm4 = mem[2,3,2,3]
-; AVX512-NEXT: vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm6 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm15[0,1,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512-NEXT: vpternlogq $184, %zmm3, %zmm6, %zmm4
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm28[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm20[2,3,2,3]
-; AVX512-NEXT: vpor %ymm3, %ymm8, %ymm3
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm13 = mem[2,3,2,3]
-; AVX512-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm14 = mem[1,1,0,0,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,0]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm15 = mem[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm18 = ymm27[2,3,2,3]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm19 = mem[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm20 = ymm25[2,3,2,3]
-; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm21 = mem[2,3,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm22 = ymm29[2,3,2,3]
-; AVX512-NEXT: vpternlogq $226, %zmm3, %zmm6, %zmm0
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 32-byte Folded Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm6 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; AVX512-NEXT: vmovdqa64 %xmm24, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm2[0,1,0,1],zmm1[0,1,0,1]
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512-NEXT: vpshufb %ymm9, %ymm10, %ymm1
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm9 = ymm10[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512-NEXT: vpternlogq $226, %zmm12, %zmm10, %zmm17
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm2 = mem[2,3,2,3]
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm28 = mem[2,3,2,3]
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm24
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm24
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm14[0,0,1,0]
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm3
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm3
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
+; AVX512-NEXT: vinserti64x4 $1, %ymm28, %zmm3, %zmm28
+; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm10, %zmm28
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm9[2,2,3,2]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm10
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512-NEXT: vmovdqa64 %xmm27, %xmm2
+; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm27 # 16-byte Folded Reload
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512-NEXT: vmovdqa64 %xmm26, %xmm1
+; AVX512-NEXT: vpshufb %xmm1, %xmm2, %xmm2
+; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqa64 %xmm21, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm22, %xmm3
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512-NEXT: vmovdqa64 %xmm25, %xmm1
+; AVX512-NEXT: vpshufb %xmm1, %xmm8, %xmm8
+; AVX512-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm8 = zmm8[0,1,0,1],mem[0,1,0,1]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[1,1,0,0,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm5, %zmm5
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm9, %zmm11, %zmm9
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm11, %zmm12, %zmm11
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm9, %zmm12, %zmm9
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512-NEXT: vpternlogq $184, %zmm11, %zmm12, %zmm9
+; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm13[0,1,0,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm18[0,1,0,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm20[2,3,2,3]
+; AVX512-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm1 = mem[1,1,0,0,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX512-NEXT: vpermq {{.*#+}} zmm13 = zmm19[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm0
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm13 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm21 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm13, %zmm21, %zmm13
+; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm20 = mem[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vpermq {{.*#+}} zmm18 = zmm30[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vporq %zmm20, %zmm18, %zmm18
+; AVX512-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm18
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 32-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm4
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm16
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm16
; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
; AVX512-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512-NEXT: vporq %ymm15, %ymm18, %ymm5
-; AVX512-NEXT: vporq %ymm19, %ymm20, %ymm6
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[0,1,2,3]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm26
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm26
-; AVX512-NEXT: vporq %ymm21, %ymm22, %ymm1
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm1
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm1
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm14
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm14
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm0 = zmm0[0,1,2,3],mem[2,3,2,3]
+; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm10
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm27[0,1,0,1,4,5,4,5]
+; AVX512-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm0 = mem[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm12[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm7[0,0,1,0,4,4,5,4]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm0
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,0,4,4,5,4]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm2 = zmm2[2,3,2,3],mem[2,3,2,3]
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm15
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm15
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rax)
; AVX512-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512-NEXT: vmovdqa64 %zmm2, 320(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm26, 256(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm16, 128(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm10, 320(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm14, 256(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm1, 192(%rax)
; AVX512-NEXT: vmovdqa64 %zmm4, 384(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm3, 192(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512-NEXT: addq $1448, %rsp # imm = 0x5A8
+; AVX512-NEXT: vmovdqa64 %zmm16, 64(%rax)
+; AVX512-NEXT: addq $1720, %rsp # imm = 0x6B8
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i8_stride7_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $1256, %rsp # imm = 0x4E8
-; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm14
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm13
+; AVX512-FCP-NEXT: subq $1432, %rsp # imm = 0x598
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512-FCP-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm1
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm16
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128,25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128]
+; AVX512-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm28
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm19
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm24
; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm17
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%r9), %ymm4
-; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm18
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero
-; AVX512-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29]
+; AVX512-FCP-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm23
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512-FCP-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm22
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa 32(%rax), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm23
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm5
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm1
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm26
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm3
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,0,1,14],zero,ymm2[14,15,0,1,14,15],zero,ymm2[13,14,15,16,17,16],zero,ymm2[30,31,30,31,16,17],zero,ymm2[31,28,29,30,31]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm25
-; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm5
-; AVX512-FCP-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
-; AVX512-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm5
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm30
-; AVX512-FCP-NEXT: vporq %ymm3, %ymm5, %ymm24
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512-FCP-NEXT: vmovdqa64 %xmm6, %xmm28
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm6
-; AVX512-FCP-NEXT: vmovdqa64 %xmm9, %xmm19
-; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
-; AVX512-FCP-NEXT: vpor %xmm5, %xmm6, %xmm3
-; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm10
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm5
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm9
-; AVX512-FCP-NEXT: vmovdqa64 %xmm10, %xmm27
-; AVX512-FCP-NEXT: vpor %xmm5, %xmm9, %xmm5
-; AVX512-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm15
-; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm15, %xmm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm12
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm21
-; AVX512-FCP-NEXT: vporq %xmm9, %xmm12, %xmm22
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm7
-; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm20
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm14, %ymm1
-; AVX512-FCP-NEXT: vpor %ymm7, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,1,14],zero,ymm7[12,13,0,1,14,15],zero,ymm7[3,12,13,2,3,16],zero,ymm7[30,31,28,29,16,17],zero,ymm7[31,18,19,28,29,18],zero
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512-FCP-NEXT: vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm7
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm13
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm0
+; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm1
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[13,u,u,u,u,u],zero,ymm0[14,u,u,u,u,u],zero,ymm0[15,u,u,u,u,u],zero,ymm0[16,u,u,u,u,u],zero,ymm0[17,u,u,u]
+; AVX512-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm9
+; AVX512-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX512-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vpor %xmm7, %xmm9, %xmm7
+; AVX512-FCP-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm11
+; AVX512-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm9
+; AVX512-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm11
+; AVX512-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm9
+; AVX512-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm14
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm14, %xmm11
+; AVX512-FCP-NEXT: vmovdqa64 %xmm14, %xmm30
+; AVX512-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
+; AVX512-FCP-NEXT: vmovdqa64 %xmm9, %xmm26
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm1
-; AVX512-FCP-NEXT: vporq %xmm0, %xmm1, %xmm31
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm14
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm14, %xmm0
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm8
-; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
-; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm1, %xmm3
-; AVX512-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm1
-; AVX512-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm10
+; AVX512-FCP-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
+; AVX512-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm7
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm10
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
+; AVX512-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm11
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm12
+; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm13
+; AVX512-FCP-NEXT: vmovdqa %xmm6, %xmm9
+; AVX512-FCP-NEXT: vporq %xmm12, %xmm13, %xmm31
+; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm12
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm13
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm12
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm13
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm20
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm29
+; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm12
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm13
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm16
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm28
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm13
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512-FCP-NEXT: # ymm12 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm15
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm15, %zmm27
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm13
+; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm15
+; AVX512-FCP-NEXT: vpor %ymm13, %ymm15, %ymm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm15
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm24
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm14
+; AVX512-FCP-NEXT: vpor %ymm15, %ymm14, %ymm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
+; AVX512-FCP-NEXT: vpor %ymm5, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512-FCP-NEXT: vporq %ymm2, %ymm3, %ymm23
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27,u,u,u],zero,ymm0[30],zero,ymm0[28,u,u,u],zero,ymm0[31],zero
+; AVX512-FCP-NEXT: vporq %ymm1, %ymm0, %ymm22
+; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,0,1,14],zero,ymm3[14,15,0,1,14,15],zero,ymm3[13,14,15,16,17,16],zero,ymm3[30,31,30,31,16,17],zero,ymm3[31,28,29,30,31]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm12
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm11
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[18],zero,zero,zero,zero,ymm11[21],zero,ymm11[19],zero,zero,zero,zero,ymm11[22],zero,ymm11[20]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm30
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
+; AVX512-FCP-NEXT: vmovdqa %xmm9, %xmm8
+; AVX512-FCP-NEXT: vmovdqa64 %xmm11, %xmm17
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [2,2,3,3,2,2,3,3]
-; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vmovdqa (%rax), %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm4
-; AVX512-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm18
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm24
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512-FCP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm25
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm26 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512-FCP-NEXT: # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm26, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm2[0,1,2,3],zmm0[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm28, %xmm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm28
-; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm0
-; AVX512-FCP-NEXT: vmovdqa %xmm6, %xmm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm27
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm22[0,1,0,1],zmm2[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rax), %xmm0
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm29
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm5
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm4
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm30
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
-; AVX512-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm16
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm15
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm15[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm13
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm13[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm31, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm13
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm19
+; AVX512-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm1
+; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm12
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm12[13],zero,zero,zero,zero,zero,zero,ymm12[14],zero,zero,zero,zero,zero,zero,ymm12[15],zero,zero,zero,zero,zero,zero,ymm12[16],zero,zero,zero,zero,zero,zero,ymm12[17],zero,zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm20
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm0
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm25
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm8[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm23[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm11[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm31 # 16-byte Folded Reload
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm14
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm13
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpor %ymm12, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm9, %zmm6
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm9, %ymm5, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm7
-; AVX512-FCP-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm16 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm11[8],xmm2[8],xmm11[9],xmm2[9],xmm11[10],xmm2[10],xmm11[11],xmm2[11],xmm11[12],xmm2[12],xmm11[13],xmm2[13],xmm11[14],xmm2[14],xmm11[15],xmm2[15]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm4
-; AVX512-FCP-NEXT: vpor %ymm8, %ymm14, %ymm2
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm18
+; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm0
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm21
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm13, %xmm1
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm5
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm4, %zmm0, %zmm5
-; AVX512-FCP-NEXT: vpandq %ymm9, %ymm22, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm20, %zmm0
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm2, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpandq %ymm26, %ymm19, %ymm2
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm4, %zmm2, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512-FCP-NEXT: vpandq %ymm26, %ymm18, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm15, %zmm0
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm4, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm2, %zmm4, %zmm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm28[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm27[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm2, %zmm4, %zmm8
-; AVX512-FCP-NEXT: vpandq %ymm26, %ymm13, %ymm2
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm2, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vporq %zmm2, %zmm6, %zmm9
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm9
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512-FCP-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512-FCP-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm18 = zmm1[0,1,0,1],mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm3
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[1,1,0,0,4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,0,1,2,0,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm4, %ymm19
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[1,1,0,0,4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %ymm6, %ymm4, %ymm17
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm10
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
-; AVX512-FCP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm11
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512-FCP-NEXT: # ymm14 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm15
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm2[23],zero,ymm2[23,24,25,26],zero,ymm2[24],zero,ymm2[30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm14
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm4 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [4,5,4,5,5,7,4,5]
-; AVX512-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm20
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm22 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm22
-; AVX512-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm23 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm13
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 %xmm30, %xmm5
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,0,1],zmm1[0,1,0,1]
+; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rax), %xmm3
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,5,5,6]
+; AVX512-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm26
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm24 # 16-byte Folded Reload
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3],xmm10[4],xmm7[4],xmm10[5],xmm7[5],xmm10[6],xmm7[6],xmm10[7],xmm7[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqa64 %xmm17, %xmm2
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3],xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm8
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm31[0,1,0,1]
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm1
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm30 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm31, %zmm30, %zmm30
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm30
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm31 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm30, %zmm31, %zmm14
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm18[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm13
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm7 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm7, %zmm29, %zmm7
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm28 = zmm28[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm27 = zmm27[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vporq %zmm28, %zmm27, %zmm27
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm27
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm23[2,3,2,3],zmm7[0,1,0,1]
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3],xmm5[4],xmm15[4],xmm5[5],xmm15[5],xmm5[6],xmm15[6],xmm5[7],xmm15[7]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm10
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm22[2,3,2,3],zmm10[0,1,0,1]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm2
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [0,1,0,1,2,0,0,1]
+; AVX512-FCP-NEXT: vpermd %ymm11, %ymm17, %ymm28
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[1,1,0,0,4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %ymm4, %ymm17, %ymm17
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm2 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm23 = [4,5,4,5,5,7,4,5]
+; AVX512-FCP-NEXT: vpermd %ymm2, %ymm23, %ymm2
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,ymm15[13],zero,zero,zero,zero,zero,zero,ymm15[14],zero,zero,zero,zero,zero,zero,ymm15[15],zero,zero,zero,zero,zero,zero,ymm15[16],zero,zero,zero,zero,zero,zero,ymm15[17],zero,zero
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm15 = ymm15[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512-FCP-NEXT: vpermd %ymm15, %ymm23, %ymm15
+; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm11, %zmm11
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm7
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm1 = mem[2,3,2,3]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm18 = mem[2,3,2,3]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm3, %zmm18
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,1,0,1]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm25[0,1,0,1]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,0]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm23
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm24
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm24
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm2, %zmm0
-; AVX512-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm21
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
-; AVX512-FCP-NEXT: vpor %ymm12, %ymm15, %ymm2
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[0,1,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm16
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm31[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm17, %zmm2
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm13, %ymm1
-; AVX512-FCP-NEXT: vpor %ymm11, %ymm14, %ymm5
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm20, %zmm4
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm18
+; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm14 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm14
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm20
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm20
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm24[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm9
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm5
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm5
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm5
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm6 = zmm3[2,3,2,3],mem[2,3,2,3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm4, %zmm4
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm4
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm6 = zmm3[0,1,2,3],mem[2,3,2,3]
+; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm11
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 320(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 320(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 192(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 256(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm21, 192(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512-FCP-NEXT: addq $1256, %rsp # imm = 0x4E8
+; AVX512-FCP-NEXT: vmovdqa64 %zmm20, 64(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 384(%rax)
+; AVX512-FCP-NEXT: addq $1432, %rsp # imm = 0x598
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i8_stride7_vf64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: subq $1448, %rsp # imm = 0x5A8
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm10
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: subq $1720, %rsp # imm = 0x6B8
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm7, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm26
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
-; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm14
-; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm6
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm6, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm23
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm14
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm14, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm28
; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm8
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm8, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm17
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm8, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm23
+; AVX512DQ-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm15
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512DQ-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm15, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-NEXT: vmovdqa (%r9), %ymm2
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
; AVX512DQ-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm22
-; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm24
-; AVX512DQ-NEXT: vmovdqa 32(%r9), %ymm11
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm13
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm10
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128]
+; AVX512DQ-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm6, %ymm10, %ymm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm9
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[23],zero,zero,zero,zero,ymm9[26],zero,ymm9[24],zero,zero,zero,zero,ymm9[27],zero,ymm9[25]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm7
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm9
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512DQ-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [12,13,14,128,12,128,14,15,14,15,128,13,128,15,12,13,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm9, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm16
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm10[23,u,u,u],zero,ymm10[26],zero,ymm10[24,u,u,u],zero,ymm10[27],zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm5
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128]
+; AVX512DQ-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm5, %ymm2
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm4
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero,zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,0]
; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm7, %ymm2
-; AVX512DQ-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm5
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm20
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
+; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm11
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm21
+; AVX512DQ-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512DQ-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128,29,128,27,0,0,0,128,30,128,28,0,0,0,128,31,128]
+; AVX512DQ-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm2, %ymm12
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm4
-; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm4
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm14, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[21],zero,ymm10[19],zero,zero,zero,zero,ymm10[22],zero,ymm10[20],zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm6, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm6, %ymm7, %ymm6
+; AVX512DQ-NEXT: vmovdqa64 %ymm7, %ymm17
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm13
+; AVX512DQ-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm13, %ymm7
+; AVX512DQ-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm8[23],zero,ymm8[21,22,23,26],zero,ymm8[24],zero,ymm8[28,29,26,27]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm8[18,19,20,21],zero,ymm8[19],zero,ymm8[25,26,27,22],zero,ymm8[20],zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm8, %ymm19
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm8, %ymm6
+; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm14, %ymm7
+; AVX512DQ-NEXT: vmovdqa64 %ymm14, %ymm22
+; AVX512DQ-NEXT: vpor %ymm6, %ymm7, %ymm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm15, %ymm6
+; AVX512DQ-NEXT: vmovdqa64 %ymm15, %ymm16
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm15, %ymm7
+; AVX512DQ-NEXT: vpor %ymm6, %ymm7, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rax), %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm2, %zmm16
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm29
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm4 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm30
-; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm1
+; AVX512DQ-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-NEXT: vpshufb %xmm11, %xmm1, %xmm6
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm0, %xmm7
+; AVX512DQ-NEXT: vpor %xmm6, %xmm7, %xmm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm4
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm12 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm4, %xmm0
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm3, %xmm2
-; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm21
-; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm14
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm14, %xmm7
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm0, %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm18
+; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
-; AVX512DQ-NEXT: vmovdqa 32(%rax), %xmm2
-; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,6]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm2, %zmm3
-; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,0,0,0,2,3,0,1,0,18,0,19,18,0,19,0]
+; AVX512DQ-NEXT: vmovdqa 32(%rax), %xmm0
+; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm8, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%r9), %xmm0
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %xmm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm0, %xmm8
-; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm28
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm2, %xmm10
-; AVX512DQ-NEXT: vmovdqa %xmm2, %xmm3
-; AVX512DQ-NEXT: vporq %xmm8, %xmm10, %xmm26
+; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %xmm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-NEXT: vpshufb %xmm8, %xmm0, %xmm7
+; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm20
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm30
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm29
+; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm28, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm7
; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm7, %ymm8
-; AVX512DQ-NEXT: vmovdqa64 %ymm17, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm9, %ymm10
-; AVX512DQ-NEXT: vpor %ymm8, %ymm10, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm5, %ymm8
+; AVX512DQ-NEXT: vpor %ymm7, %ymm8, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
-; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm23
-; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[0,1,14],zero,ymm0[12,13,0,1,14,15],zero,ymm0[3,12,13,2,3,16],zero,ymm0[30,31,28,29,16,17],zero,ymm0[31,18,19,28,29,18],zero
-; AVX512DQ-NEXT: vpor %ymm8, %ymm10, %ymm2
-; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm2
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm13, %ymm8
-; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm2
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm11, %ymm10
-; AVX512DQ-NEXT: vmovdqa64 %ymm11, %ymm27
-; AVX512DQ-NEXT: vpor %ymm8, %ymm10, %ymm2
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512DQ-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm21
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm5, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm19
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm10, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm9, %ymm1
+; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512DQ-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm7, %ymm10, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,4,5,128,3,128,5,4,5,6,128,4,128,6,7,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm9, %ymm4
+; AVX512DQ-NEXT: vpor %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512DQ-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm2, %ymm4
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm5
+; AVX512DQ-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512DQ-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm24, %ymm4
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm4
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm2, %xmm5
-; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm25
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm12, %xmm9
-; AVX512DQ-NEXT: vporq %xmm5, %xmm9, %xmm22
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm13
-; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm13, %xmm7
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm9, %xmm10
-; AVX512DQ-NEXT: vpor %xmm7, %xmm10, %xmm2
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm3, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm31
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm28
+; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%r9), %xmm2
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm11, %xmm3, %xmm2
+; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm6
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm4, %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm18
+; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm2, %xmm6
-; AVX512DQ-NEXT: vmovdqa (%r8), %xmm5
-; AVX512DQ-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm5, %xmm11
-; AVX512DQ-NEXT: vpor %xmm6, %xmm11, %xmm6
-; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm6
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm7 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-NEXT: vpshufb %xmm7, %xmm11, %xmm11
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm11[0,1,0,1],zmm6[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa (%r9), %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm20, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm8
+; AVX512DQ-NEXT: vmovdqa (%r8), %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm9
+; AVX512DQ-NEXT: vpor %xmm2, %xmm4, %xmm2
; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rax), %xmm10
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,4,5,5,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm11
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = zero,ymm1[13],zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm11, %zmm24
-; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm5 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero,zero,ymm13[27],zero,ymm13[25]
+; AVX512DQ-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm7, %ymm3, %ymm4
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm3[23,u,u,u],zero,ymm3[26],zero,ymm3[24,u,u,u],zero,ymm3[27],zero
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm13, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm2
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
+; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm2, %ymm11
-; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm29
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm31 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800,18446463693966278655,18446742978476179455,18446463693966278655,18446742978476179455]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm31, %ymm11, %ymm15
-; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm6
-; AVX512DQ-NEXT: vmovdqa %xmm3, %xmm8
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm15[0,1,2,3],zmm11[0,1,0,1]
-; AVX512DQ-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm17
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm11 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm5
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm5, %ymm15
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm19
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm11 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm23
-; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm0
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm2[23],zero,ymm2[21,22,23,26],zero,ymm2[24],zero,ymm2[28,29,26,27]
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm30
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero
+; AVX512DQ-NEXT: vmovdqa64 %ymm16, %ymm3
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm3[23],zero,ymm3[23,24,25,26],zero,ymm3[24],zero,ymm3[30,31]
+; AVX512DQ-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm15, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa (%rax), %ymm4
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,5,4,0,5,0,4,0,20,21,0,23,0,21,0,23]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm15
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm22
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm24
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm1[0,1,0,1],zmm0[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa (%rax), %xmm11
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm11[0,1,2,3,4,5,5,6]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm4, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm16
+; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm27
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm0[2,3,2,3],zmm2[0,1,0,1]
+; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm15, %xmm3
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm0
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm3, %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm26
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm0[2,3,2,3],zmm3[0,1,0,1]
+; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm13, %xmm13
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm25
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm0[2,3,2,3],zmm13[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512DQ-NEXT: vmovdqa %xmm6, %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm7
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm4, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm30
-; AVX512DQ-NEXT: vmovdqa64 %xmm21, %xmm0
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm3
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm4 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm19
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm10[8],xmm14[9],xmm10[9],xmm14[10],xmm10[10],xmm14[11],xmm10[11],xmm14[12],xmm10[12],xmm14[13],xmm10[13],xmm14[14],xmm10[14],xmm14[15],xmm10[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm4
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm14, %xmm13
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm21
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15]
-; AVX512DQ-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm26 = zmm26[0,1,0,1],zmm0[0,1,0,1]
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm6
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm8
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm28
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm20
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20],zero,zero
-; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm29, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm1, %ymm4
-; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm1, %ymm0
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm14, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm27
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm14, %ymm14
-; AVX512DQ-NEXT: vmovdqa64 %ymm14, %ymm25
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm29
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm14 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm14
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3],xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm15, %xmm15
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm12, %xmm12
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm22, %zmm12, %zmm0
-; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm11, %xmm9
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm13, %xmm11
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm6[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm18[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm22 = ymm4[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm8[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm7[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm5[2,3,2,3]
-; AVX512DQ-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm12 # 16-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm11 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm11 = zmm5[0,1,0,1],mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm10[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm7
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm19[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogq $236, %zmm31, %zmm10, %zmm5
-; AVX512DQ-NEXT: vpandq %ymm31, %ymm22, %ymm10
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm10 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm10, %zmm1, %zmm1
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm10 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655,18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-NEXT: vpand %ymm6, %ymm10, %ymm6
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm6, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm1, %zmm0, %zmm5
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm6 = zmm23[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogq $236, %zmm10, %zmm1, %zmm6
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm17 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vporq %zmm1, %zmm17, %zmm1
-; AVX512DQ-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm6 = zmm30[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm17 = zmm21[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm17
-; AVX512DQ-NEXT: vpternlogq $248, %ymm10, %ymm13, %ymm14
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm3[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $236, %ymm10, %ymm4, %ymm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm9[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm14, %zmm3
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm4 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm6 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm15[0,1,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm6 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm3, %zmm6, %zmm4
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm28[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm20[2,3,2,3]
-; AVX512DQ-NEXT: vpor %ymm3, %ymm8, %ymm3
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512DQ-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm13 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm14 = mem[1,1,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,0]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm15 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm18 = ymm27[2,3,2,3]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm19 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm20 = ymm25[2,3,2,3]
-; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm21 = mem[2,3,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm22 = ymm29[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm3, %zmm6, %zmm0
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm6 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm2[0,1,0,1],zmm1[0,1,0,1]
+; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm10, %ymm1
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm9 = ymm10[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm10 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm12, %zmm10, %zmm17
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm2 = mem[2,3,2,3]
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm28 = mem[2,3,2,3]
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm24
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm24
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm14[0,0,1,0]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm3
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm3
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm28, %zmm3, %zmm28
+; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm10, %zmm28
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm9[2,2,3,2]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm10
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm27, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm27 # 16-byte Folded Reload
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm26, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm2, %xmm2
+; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
+; AVX512DQ-NEXT: vmovdqa64 %xmm21, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm22, %xmm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm8, %xmm8
+; AVX512DQ-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm8 = zmm8[0,1,0,1],mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[1,1,0,0,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm5, %zmm5
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm9, %zmm11, %zmm9
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm11 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm11, %zmm12, %zmm11
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm12 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm9, %zmm12, %zmm9
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512DQ-NEXT: vpternlogq $184, %zmm11, %zmm12, %zmm9
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm13[0,1,0,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm18[0,1,0,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm20[2,3,2,3]
+; AVX512DQ-NEXT: vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512DQ-NEXT: # xmm1 = mem[1,1,0,0,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm13 = zmm19[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm0
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm13 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm21 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm13, %zmm21, %zmm13
+; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm20 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm18 = zmm30[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vporq %zmm20, %zmm18, %zmm18
+; AVX512DQ-NEXT: vpternlogq $226, %zmm13, %zmm12, %zmm18
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm4
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm16
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm16
; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512DQ-NEXT: vporq %ymm15, %ymm18, %ymm5
-; AVX512DQ-NEXT: vporq %ymm19, %ymm20, %ymm6
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[0,1,2,3]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm26
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm26
-; AVX512DQ-NEXT: vporq %ymm21, %ymm22, %ymm1
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm1
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm1
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm14
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm14
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm0 = zmm0[0,1,2,3],mem[2,3,2,3]
+; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm10
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm27[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm0 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm12[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm7[0,0,1,0,4,4,5,4]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm0
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,0,4,4,5,4]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm2 = zmm2[2,3,2,3],mem[2,3,2,3]
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm15
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm15
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-NEXT: vmovdqa64 %zmm15, 128(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm26, 256(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm16, 128(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm10, 320(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm14, 256(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm1, 192(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm4, 384(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm3, 192(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512DQ-NEXT: addq $1448, %rsp # imm = 0x5A8
+; AVX512DQ-NEXT: vmovdqa64 %zmm16, 64(%rax)
+; AVX512DQ-NEXT: addq $1720, %rsp # imm = 0x6B8
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i8_stride7_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $1256, %rsp # imm = 0x4E8
-; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm13
+; AVX512DQ-FCP-NEXT: subq $1432, %rsp # imm = 0x598
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQ-FCP-NEXT: # ymm15 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm1
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm16
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm25
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128,25,128,23,0,0,0,128,26,128,24,0,0,0,128,27,128]
+; AVX512DQ-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm28
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm19
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm24
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm17
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm18
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm23
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-FCP-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm22
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm23
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm3
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,0,1,14],zero,ymm2[14,15,0,1,14,15],zero,ymm2[13,14,15,16,17,16],zero,ymm2[30,31,30,31,16,17],zero,ymm2[31,28,29,30,31]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm25
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
-; AVX512DQ-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm30
-; AVX512DQ-FCP-NEXT: vporq %ymm3, %ymm5, %ymm24
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm28
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm9, %xmm19
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm29
-; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm6, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm9
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm10, %xmm27
-; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm9, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm15
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm15, %xmm9
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm12
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm21
-; AVX512DQ-FCP-NEXT: vporq %xmm9, %xmm12, %xmm22
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm7
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm13, %ymm20
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm14, %ymm1
-; AVX512DQ-FCP-NEXT: vpor %ymm7, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,1,14],zero,ymm7[12,13,0,1,14,15],zero,ymm7[3,12,13,2,3,16],zero,ymm7[30,31,28,29,16,17],zero,ymm7[31,18,19,28,29,18],zero
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm7, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm13
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[13,u,u,u,u,u],zero,ymm0[14,u,u,u,u,u],zero,ymm0[15,u,u,u,u,u],zero,ymm0[16,u,u,u,u,u],zero,ymm0[17,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm9, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm11
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm11
+; AVX512DQ-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm14
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm14, %xmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm14, %xmm30
+; AVX512DQ-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm9, %xmm26
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm1
-; AVX512DQ-FCP-NEXT: vporq %xmm0, %xmm1, %xmm31
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm14
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm14, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
-; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm9, %xmm21
+; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
+; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm11
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm12
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm13
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, %xmm9
+; AVX512DQ-FCP-NEXT: vporq %xmm12, %xmm13, %xmm31
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm12
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
+; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm13
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm12
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm20
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm29
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm12
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm16
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm28
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm13
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512DQ-FCP-NEXT: # ymm12 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm15
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm15, %zmm27
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm15
+; AVX512DQ-FCP-NEXT: vpor %ymm13, %ymm15, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm24
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512DQ-FCP-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm14
+; AVX512DQ-FCP-NEXT: vpor %ymm15, %ymm14, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm4[28],zero,ymm4[30,31,30,31],zero,ymm4[29],zero,ymm4[31,28,29]
+; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm3[30],zero,ymm3[28,u,u,u],zero,ymm3[31],zero,ymm3[29,u]
+; AVX512DQ-FCP-NEXT: vporq %ymm2, %ymm3, %ymm23
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm0[27,u,u,u],zero,ymm0[30],zero,ymm0[28,u,u,u],zero,ymm0[31],zero
+; AVX512DQ-FCP-NEXT: vporq %ymm1, %ymm0, %ymm22
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,0,1,14],zero,ymm3[14,15,0,1,14,15],zero,ymm3[13,14,15,16,17,16],zero,ymm3[30,31,30,31,16,17],zero,ymm3[31,28,29,30,31]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[18],zero,zero,zero,zero,ymm11[21],zero,ymm11[19],zero,zero,zero,zero,ymm11[22],zero,ymm11[20]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm30
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, %xmm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm11, %xmm17
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [2,2,3,3,2,2,3,3]
-; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512DQ-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm18
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm24
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512DQ-FCP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm25
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm26 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-FCP-NEXT: # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm26, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm2[0,1,2,3],zmm0[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm28
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm27
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm22[0,1,0,1],zmm2[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %xmm0
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm29
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm4
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm30
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
-; AVX512DQ-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm16
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512DQ-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm15
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm15[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm13
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm13[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm31, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm13
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm19
+; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm1
+; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm12[13],zero,zero,zero,zero,zero,zero,ymm12[14],zero,zero,zero,zero,zero,zero,ymm12[15],zero,zero,zero,zero,zero,zero,ymm12[16],zero,zero,zero,zero,zero,zero,ymm12[17],zero,zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm20
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm25
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm8[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm9
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm23[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm11[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm31 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512DQ-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm14
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm13
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpor %ymm12, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm9, %zmm6
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512DQ-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm9, %ymm5, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm16 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm11[8],xmm2[8],xmm11[9],xmm2[9],xmm11[10],xmm2[10],xmm11[11],xmm2[11],xmm11[12],xmm2[12],xmm11[13],xmm2[13],xmm11[14],xmm2[14],xmm11[15],xmm2[15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm4
-; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm14, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm18
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm0
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm21
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm13, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm4, %zmm0, %zmm5
-; AVX512DQ-FCP-NEXT: vpandq %ymm9, %ymm22, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm20, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm2, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpandq %ymm26, %ymm19, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm4, %zmm2, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-FCP-NEXT: vpandq %ymm26, %ymm18, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm15, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm4 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm4, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm2, %zmm4, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm28[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm27[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm2, %zmm4, %zmm8
-; AVX512DQ-FCP-NEXT: vpandq %ymm26, %ymm13, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm2, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vporq %zmm2, %zmm6, %zmm9
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm9
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-FCP-NEXT: vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm18 = zmm1[0,1,0,1],mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm3
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[1,1,0,0,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,0,1,2,0,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm4, %ymm19
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[1,1,0,0,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm4, %ymm17
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm10
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512DQ-FCP-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm12
-; AVX512DQ-FCP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm11
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512DQ-FCP-NEXT: # ymm14 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm15
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm2[23],zero,ymm2[23,24,25,26],zero,ymm2[24],zero,ymm2[30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm4 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [4,5,4,5,5,7,4,5]
-; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm20
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm22 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm22
-; AVX512DQ-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm23 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm13
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm5
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,0,1],zmm1[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %xmm3
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,5,5,6]
+; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm26
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm24 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3],xmm10[4],xmm7[4],xmm10[5],xmm7[5],xmm10[6],xmm7[6],xmm10[7],xmm7[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3],xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm31[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm1
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm30 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm31, %zmm30, %zmm30
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm30
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm14 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm31 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm14, %zmm31, %zmm14
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm31 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm30, %zmm31, %zmm14
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm18[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm13 = zmm13[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm13
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm7 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm7, %zmm29, %zmm7
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm28 = zmm28[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm27 = zmm27[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vporq %zmm28, %zmm27, %zmm27
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm7, %zmm31, %zmm27
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm23[2,3,2,3],zmm7[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3],xmm5[4],xmm15[4],xmm5[5],xmm15[5],xmm5[6],xmm15[6],xmm5[7],xmm15[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm22[2,3,2,3],zmm10[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm2
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [0,1,0,1,2,0,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm17, %ymm28
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[1,1,0,0,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm17, %ymm17
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm2 = ymm12[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm23 = [4,5,4,5,5,7,4,5]
+; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm23, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,ymm15[13],zero,zero,zero,zero,zero,zero,ymm15[14],zero,zero,zero,zero,zero,zero,ymm15[15],zero,zero,zero,zero,zero,zero,ymm15[16],zero,zero,zero,zero,zero,zero,ymm15[17],zero,zero
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm15 = ymm15[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpermd %ymm15, %ymm23, %ymm15
+; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm11, %zmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm7
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm18 = mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm3, %zmm18
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm21[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm25[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,0]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm23
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm24
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm24
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm21
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
-; AVX512DQ-FCP-NEXT: vpor %ymm12, %ymm15, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm16
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512DQ-FCP-NEXT: vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm31[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm17, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm13, %ymm1
-; AVX512DQ-FCP-NEXT: vpor %ymm11, %ymm14, %ymm5
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm20, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm15, %zmm18
+; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm1 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm14 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm14
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm20
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm20
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm24[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm9 = zmm9[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm9
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm6 = zmm3[2,3,2,3],mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm6 = zmm3[0,1,2,3],mem[2,3,2,3]
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm11
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 320(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 320(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 192(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 256(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm21, 192(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, 64(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512DQ-FCP-NEXT: addq $1256, %rsp # imm = 0x4E8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, 64(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 384(%rax)
+; AVX512DQ-FCP-NEXT: addq $1432, %rsp # imm = 0x598
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -10436,8 +10220,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-NEXT: vmovdqa (%rax), %ymm13
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm27 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm13, %ymm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-NEXT: vpshufb %ymm26, %ymm13, %ymm0
; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
; AVX512BW-NEXT: vpermw %ymm13, %ymm1, %ymm1
@@ -10446,12 +10230,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm17 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
; AVX512BW-NEXT: vpshufb %ymm17, %ymm9, %ymm1
; AVX512BW-NEXT: vmovdqa (%r8), %ymm10
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-NEXT: vpshufb %ymm22, %ymm10, %ymm2
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm21, %ymm10, %ymm2
; AVX512BW-NEXT: vpor %ymm1, %ymm2, %ymm2
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm12
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm3
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm1
+; AVX512BW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm12
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm8
@@ -10461,270 +10246,257 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqa (%rdx), %ymm14
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
; AVX512BW-NEXT: vpshufb %ymm0, %ymm14, %ymm2
-; AVX512BW-NEXT: vmovdqa64 (%rcx), %ymm16
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm16, %ymm4
+; AVX512BW-NEXT: vmovdqa (%rcx), %ymm15
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm20, %ymm15, %ymm4
; AVX512BW-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512BW-NEXT: vmovdqa (%rdx), %xmm4
; AVX512BW-NEXT: vmovdqa (%rcx), %xmm5
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm20
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm22
; AVX512BW-NEXT: vmovdqa64 (%rdi), %ymm18
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
; AVX512BW-NEXT: vpshufb %ymm24, %ymm18, %ymm2
; AVX512BW-NEXT: vmovdqa64 (%rsi), %ymm19
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-NEXT: vpshufb %ymm26, %ymm19, %ymm6
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm19, %ymm6
; AVX512BW-NEXT: vpor %ymm2, %ymm6, %ymm2
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm6
; AVX512BW-NEXT: vmovdqa (%rsi), %xmm7
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm21, %zmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm23 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm23 = xmm23[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm23 = ymm23[0,1,0,1]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm23, %zmm3
; AVX512BW-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512BW-NEXT: kmovq %r10, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm20, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm22, %zmm3 {%k1}
; AVX512BW-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512BW-NEXT: kmovq %r10, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm2 {%k1}
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm3 {%k1}
; AVX512BW-NEXT: vmovdqa64 32(%rdx), %ymm29
+; AVX512BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
+; AVX512BW-NEXT: vmovdqa64 32(%rcx), %ymm30
+; AVX512BW-NEXT: vpshufb %ymm20, %ymm30, %ymm8
+; AVX512BW-NEXT: vpor %ymm0, %ymm8, %ymm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
; AVX512BW-NEXT: vpshufb %ymm20, %ymm29, %ymm8
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm22, %ymm30, %ymm23
+; AVX512BW-NEXT: vporq %ymm8, %ymm23, %ymm8
; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512BW-NEXT: vmovdqa64 32(%rcx), %ymm30
-; AVX512BW-NEXT: vpshufb %ymm21, %ymm30, %ymm25
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm8, %ymm25, %ymm8
-; AVX512BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm30, %ymm23
-; AVX512BW-NEXT: vporq %ymm0, %ymm23, %ymm0
; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 32(%rsi), %ymm28
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm15
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vmovdqa64 32(%rdi), %ymm16
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm16[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm25 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm23 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
; AVX512BW-NEXT: movl $676341840, %r10d # imm = 0x28502850
; AVX512BW-NEXT: kmovd %r10d, %k1
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm28, %ymm8 {%k1}
+; AVX512BW-NEXT: vpshufb %ymm23, %ymm28, %ymm8 {%k1}
; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512BW-NEXT: vpshufb %ymm24, %ymm15, %ymm23
-; AVX512BW-NEXT: vpshufb %ymm26, %ymm28, %ymm24
-; AVX512BW-NEXT: vporq %ymm23, %ymm24, %ymm23
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm23, %zmm8
+; AVX512BW-NEXT: vpshufb %ymm24, %ymm16, %ymm24
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm28, %ymm25
+; AVX512BW-NEXT: vporq %ymm24, %ymm25, %ymm24
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm24, %zmm8
; AVX512BW-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
; AVX512BW-NEXT: kmovq %r10, %k2
; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm8 {%k2}
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-NEXT: vmovdqa64 32(%r8), %ymm31
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm31, %ymm0
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm26 = ymm0[2,3,2,3]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512BW-NEXT: vpshufb %ymm24, %ymm0, %ymm11
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm26, %ymm11, %ymm11
-; AVX512BW-NEXT: vpshufb %ymm17, %ymm0, %ymm17
-; AVX512BW-NEXT: vpshufb %ymm22, %ymm31, %ymm22
-; AVX512BW-NEXT: vporq %ymm17, %ymm22, %ymm17
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm17, %zmm11
+; AVX512BW-NEXT: vmovdqa64 32(%r9), %ymm31
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm31, %ymm17
+; AVX512BW-NEXT: vmovdqa 32(%r8), %ymm1
+; AVX512BW-NEXT: vpshufb %ymm21, %ymm1, %ymm21
+; AVX512BW-NEXT: vporq %ymm17, %ymm21, %ymm17
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-NEXT: vpshufb %ymm24, %ymm1, %ymm21
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm31, %ymm27
+; AVX512BW-NEXT: vporq %ymm21, %ymm27, %ymm21
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[2,3,2,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm21, %zmm17, %zmm21
; AVX512BW-NEXT: vmovdqa64 32(%rax), %ymm17
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm17, %ymm22
+; AVX512BW-NEXT: vpshufb %ymm26, %ymm17, %ymm27
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm26 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512BW-NEXT: vpermw %ymm17, %ymm26, %ymm27
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm27, %zmm22, %zmm22
+; AVX512BW-NEXT: vpermw %ymm17, %ymm26, %ymm11
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
; AVX512BW-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm22, %zmm11 {%k3}
+; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm21 {%k3}
; AVX512BW-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm8 {%k3}
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm15[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-NEXT: vmovdqu8 %zmm21, %zmm8 {%k3}
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm16[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[2,2,3,3,6,6,7,7]
; AVX512BW-NEXT: movl $338170920, %r10d # imm = 0x14281428
; AVX512BW-NEXT: kmovd %r10d, %k4
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm27 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
; AVX512BW-NEXT: vpshufb %ymm27, %ymm28, %ymm11 {%k4}
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm28, %ymm22
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %ymm2, %ymm28, %ymm21
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm28 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-NEXT: vpshufb %ymm28, %ymm15, %ymm15
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm22, %ymm15, %ymm15
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm15, %zmm15
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm22 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm11, %ymm22, %ymm22
+; AVX512BW-NEXT: vpshufb %ymm28, %ymm16, %ymm16
+; AVX512BW-NEXT: vporq %ymm21, %ymm16, %ymm16
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm16, %zmm16
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm11[0,2,3,3,4,6,7,7]
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm30, %ymm30
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm29 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm29 = ymm29[0,2,3,3,4,6,7,7]
-; AVX512BW-NEXT: vmovdqu8 %ymm29, %ymm30 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm29 = ymm30[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm29, %zmm22, %zmm22
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm30, %ymm0
+; AVX512BW-NEXT: vmovdqu8 %ymm21, %ymm0 {%k1}
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm21 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm29 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
+; AVX512BW-NEXT: vporq %ymm21, %ymm29, %ymm21
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm22, %zmm15 {%k3}
-; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm29
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm31[0,1,2,3],zmm29[4,5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm22[23],zero,zmm22[23,24,25,26],zero,zmm22[24],zero,zmm22[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm22[59],zero,zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vporq %zmm22, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm16 {%k3}
+; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm0
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm29
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm31[0,1,2,3],zmm29[4,5,6,7]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm21[25],zero,zmm21[23],zero,zero,zero,zero,zmm21[26],zero,zmm21[24],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[59],zero,zero,zero,zero,zmm21[62],zero,zmm21[60],zero,zero,zero,zero,zmm21[63],zero,zmm21[61]
+; AVX512BW-NEXT: vporq %zmm1, %zmm21, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
; AVX512BW-NEXT: kmovq %r10, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k3}
-; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm22
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm0
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k3}
+; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm21
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm1
; AVX512BW-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k5}
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm19, %ymm0 {%k1}
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm19, %ymm1
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-NEXT: vpshufb %ymm28, %ymm18, %ymm25
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-NEXT: vporq %ymm1, %ymm25, %ymm1
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm1, %zmm1
-; AVX512BW-NEXT: vpshufb %zmm20, %zmm1, %zmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k5}
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
+; AVX512BW-NEXT: vpshufb %ymm23, %ymm19, %ymm1 {%k1}
+; AVX512BW-NEXT: vpshufb %ymm2, %ymm19, %ymm2
+; AVX512BW-NEXT: vpshufb %ymm28, %ymm18, %ymm23
+; AVX512BW-NEXT: vporq %ymm2, %ymm23, %ymm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
+; AVX512BW-NEXT: vpshufb %zmm20, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm20
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm20, %zmm20
-; AVX512BW-NEXT: vpshufb %zmm21, %zmm20, %zmm20
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm20, %zmm20
+; AVX512BW-NEXT: vpshufb %zmm22, %zmm20, %zmm20
+; AVX512BW-NEXT: vporq %zmm2, %zmm20, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vporq %zmm1, %zmm20, %zmm20
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm20 {%k3}
-; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm21
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm30, %zmm0
-; AVX512BW-NEXT: vpshufb %zmm23, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm23
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm29, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm22
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm29, %zmm1
; AVX512BW-NEXT: vpshufb %zmm24, %zmm1, %zmm1
-; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm24
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm23
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm25, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqa 32(%rdx), %xmm0
+; AVX512BW-NEXT: vporq %zmm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vmovdqa 32(%rcx), %xmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm1
-; AVX512BW-NEXT: vmovdqa64 32(%rcx), %xmm25
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm22, %zmm13
-; AVX512BW-NEXT: vpermw %zmm13, %zmm26, %zmm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm21, %zmm13
+; AVX512BW-NEXT: vpermw %zmm13, %zmm26, %zmm24
; AVX512BW-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k5}
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k5}
+; AVX512BW-NEXT: vmovdqa64 32(%rdi), %xmm24
; AVX512BW-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512BW-NEXT: kmovq %rax, %k5
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k5}
; AVX512BW-NEXT: vmovdqa 32(%rsi), %xmm1
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm18 = ymm18[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm26 = ymm18[2,2,3,3,6,6,7,7]
-; AVX512BW-NEXT: vpshufb %ymm27, %ymm19, %ymm26 {%k4}
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm18[2,2,3,3,6,6,7,7]
+; AVX512BW-NEXT: vpshufb %ymm27, %ymm19, %ymm25 {%k4}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm24[0],xmm1[0],xmm24[1],xmm1[1],xmm24[2],xmm1[2],xmm24[3],xmm1[3],xmm24[4],xmm1[4],xmm24[5],xmm1[5],xmm24[6],xmm1[6],xmm24[7],xmm1[7]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
; AVX512BW-NEXT: vpshufb %xmm18, %xmm19, %xmm19
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm26[2,3,2,3],zmm19[0,1,0,1]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm24[0],xmm25[0],xmm24[1],xmm25[1],xmm24[2],xmm25[2],xmm24[3],xmm25[3],xmm24[4],xmm25[4],xmm24[5],xmm25[5],xmm24[6],xmm25[6],xmm24[7],xmm25[7]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm16, %ymm11
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-NEXT: vpshufb %xmm16, %xmm26, %xmm26
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm25 = zmm25[2,3,2,3],zmm19[0,1,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm15, %ymm11
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-NEXT: vpshufb %xmm19, %xmm26, %xmm15
; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,2,3,3,4,6,7,7]
; AVX512BW-NEXT: vmovdqu8 %ymm14, %ymm11 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm26[0,1,0,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm19, %zmm14 {%k2}
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm15[0,1,0,1]
+; AVX512BW-NEXT: vmovdqu8 %zmm25, %zmm14 {%k2}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3],xmm23[4],xmm22[4],xmm23[5],xmm22[5],xmm23[6],xmm22[6],xmm23[7],xmm22[7]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-NEXT: vpshufb %xmm15, %xmm11, %xmm11
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm9[27],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm10[27],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512BW-NEXT: vpor %ymm9, %ymm10, %ymm10
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm21[0],xmm23[1],xmm21[1],xmm23[2],xmm21[2],xmm23[3],xmm21[3],xmm23[4],xmm21[4],xmm23[5],xmm21[5],xmm23[6],xmm21[6],xmm23[7],xmm21[7]
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-NEXT: vpermw %zmm22, %zmm11, %zmm11
+; AVX512BW-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[2,3,2,3],zmm11[0,1,0,1]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm10 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-NEXT: vpermw %zmm21, %zmm10, %zmm10
; AVX512BW-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm10 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm9 {%k1}
; AVX512BW-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm14 {%k1}
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-NEXT: vpshufb %xmm11, %xmm25, %xmm10
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-NEXT: vpshufb %xmm19, %xmm24, %xmm26
-; AVX512BW-NEXT: vporq %xmm10, %xmm26, %xmm10
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm25[8],xmm24[8],xmm25[9],xmm24[9],xmm25[10],xmm24[10],xmm25[11],xmm24[11],xmm25[12],xmm24[12],xmm25[13],xmm24[13],xmm25[14],xmm24[14],xmm25[15],xmm24[15]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm24 = xmm24[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm24, %zmm10, %zmm10
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm24 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-NEXT: vpshufb %xmm24, %xmm1, %xmm25
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-NEXT: vpshufb %xmm26, %xmm0, %xmm27
-; AVX512BW-NEXT: vporq %xmm25, %xmm27, %xmm25
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm25, %zmm0
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k3}
+; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm14 {%k1}
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm2, %xmm9
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm0, %xmm25
+; AVX512BW-NEXT: vporq %xmm9, %xmm25, %xmm9
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm9, %zmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm9
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-NEXT: vpshufb %xmm25, %xmm24, %xmm26
+; AVX512BW-NEXT: vporq %xmm9, %xmm26, %xmm9
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm24[8],xmm1[9],xmm24[9],xmm1[10],xmm24[10],xmm1[11],xmm24[11],xmm1[12],xmm24[12],xmm1[13],xmm24[13],xmm1[14],xmm24[14],xmm1[15],xmm24[15]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm9 {%k3}
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm0 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-NEXT: vpshufb %xmm0, %xmm21, %xmm1
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm25 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm25
+; AVX512BW-NEXT: vpshufb %xmm0, %xmm22, %xmm1
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm24 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm24
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-NEXT: vpshufb %xmm17, %xmm23, %xmm22
-; AVX512BW-NEXT: vporq %xmm1, %xmm22, %xmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm21[8],xmm23[9],xmm21[9],xmm23[10],xmm21[10],xmm23[11],xmm21[11],xmm23[12],xmm21[12],xmm23[13],xmm21[13],xmm23[14],xmm21[14],xmm23[15],xmm21[15]
+; AVX512BW-NEXT: vpshufb %xmm17, %xmm23, %xmm21
+; AVX512BW-NEXT: vporq %xmm1, %xmm21, %xmm1
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512BW-NEXT: vinserti32x4 $2, %xmm21, %zmm1, %zmm1
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
; AVX512BW-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm25, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k1}
; AVX512BW-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k1}
-; AVX512BW-NEXT: vpshufb %xmm11, %xmm5, %xmm1
-; AVX512BW-NEXT: vpshufb %xmm19, %xmm4, %xmm11
-; AVX512BW-NEXT: vpor %xmm1, %xmm11, %xmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm9 {%k1}
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm4, %xmm10
+; AVX512BW-NEXT: vpor %xmm1, %xmm10, %xmm1
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512BW-NEXT: vpshufb %xmm16, %xmm4, %xmm4
+; AVX512BW-NEXT: vpshufb %xmm19, %xmm4, %xmm4
; AVX512BW-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm1
-; AVX512BW-NEXT: vpshufb %xmm24, %xmm7, %xmm4
-; AVX512BW-NEXT: vpshufb %xmm26, %xmm6, %xmm5
-; AVX512BW-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512BW-NEXT: vpshufb %xmm18, %xmm5, %xmm5
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512BW-NEXT: vpshufb %xmm2, %xmm7, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm25, %xmm6, %xmm4
+; AVX512BW-NEXT: vpor %xmm2, %xmm4, %xmm2
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512BW-NEXT: vpshufb %xmm18, %xmm4, %xmm4
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm2, %zmm4, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512BW-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm4 {%k1}
-; AVX512BW-NEXT: vpshufb %xmm0, %xmm12, %xmm0
-; AVX512BW-NEXT: vpshufb %xmm17, %xmm3, %xmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512BW-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX512BW-NEXT: vpshufb %xmm17, %xmm12, %xmm1
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3],xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; AVX512BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; AVX512BW-NEXT: vpshufb %xmm15, %xmm1, %xmm1
; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
; AVX512BW-NEXT: vpermw %zmm13, %zmm1, %zmm1
@@ -10734,334 +10506,319 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm8, 320(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm10, 256(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm9, 256(%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm14, 192(%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm20, 128(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm15, 384(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm3, 64(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm16, 384(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride7_vf64:
; AVX512BW-FCP: # %bb.0:
-; AVX512BW-FCP-NEXT: subq $40, %rsp
+; AVX512BW-FCP-NEXT: subq $104, %rsp
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa (%rax), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm28 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-FCP-NEXT: vpshufb %ymm28, %ymm0, %ymm1
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermw %ymm0, %ymm2, %ymm2
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2
-; AVX512BW-FCP-NEXT: vmovdqa (%r9), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm19
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %ymm20
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm22, %ymm20, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rax), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm2, %ymm0
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpermw %ymm2, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm2
+; AVX512BW-FCP-NEXT: vmovdqa (%r9), %ymm1
+; AVX512BW-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%r8), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm4
; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm0
-; AVX512BW-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512BW-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %xmm16
+; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm15
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm15[8],xmm16[8],xmm15[9],xmm16[9],xmm15[10],xmm16[10],xmm15[11],xmm16[11],xmm15[12],xmm16[12],xmm15[13],xmm16[13],xmm15[14],xmm16[14],xmm15[15],xmm16[15]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512BW-FCP-NEXT: movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa %ymm0, %ymm9
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm14
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm15
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm24
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm16
-; AVX512BW-FCP-NEXT: vporq %ymm5, %ymm16, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm16
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm17
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm25 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm25 = xmm25[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm13
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm6
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %xmm17
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %xmm19
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm29 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm29, %xmm6, %xmm6
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
+; AVX512BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512BW-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm7, %ymm21
+; AVX512BW-FCP-NEXT: vporq %ymm6, %ymm21, %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm21
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm22
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm24, %xmm24
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm24, %zmm14
; AVX512BW-FCP-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm13 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm14 {%k1}
; AVX512BW-FCP-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm13 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm3[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm26
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm11
-; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm11, %ymm4
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm4, %zmm4
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm11
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm25
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm27
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm27, %ymm26
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm12
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm27, %ymm21
-; AVX512BW-FCP-NEXT: vporq %ymm12, %ymm21, %ymm12
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm12, %zmm12
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm14 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm3
+; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm5
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm24
+; AVX512BW-FCP-NEXT: vporq %ymm5, %ymm24, %ymm5
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm25
+; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm25, %ymm23
+; AVX512BW-FCP-NEXT: vporq %ymm9, %ymm23, %ymm9
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm23
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm25, %ymm24
+; AVX512BW-FCP-NEXT: vporq %ymm23, %ymm24, %ymm23
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm9, %zmm9
; AVX512BW-FCP-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
-; AVX512BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm12 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r8), %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm21
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm21[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm26
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512BW-FCP-NEXT: vpshufb %ymm22, %ymm4, %ymm22
-; AVX512BW-FCP-NEXT: vporq %ymm1, %ymm22, %ymm1
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rax), %ymm31
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm22 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512BW-FCP-NEXT: vpermw %ymm31, %ymm22, %ymm22
-; AVX512BW-FCP-NEXT: vpshufb %ymm28, %ymm31, %ymm25
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm25, %zmm22
-; AVX512BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm9 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa 32(%r9), %ymm4
+; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm28
+; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm23
+; AVX512BW-FCP-NEXT: vporq %ymm1, %ymm23, %ymm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rax), %ymm6
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm23 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
+; AVX512BW-FCP-NEXT: vpermw %ymm6, %ymm23, %ymm23
+; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm6, %ymm20
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20
+; AVX512BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm1 {%k2}
; AVX512BW-FCP-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
-; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm12 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm25
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm25[4,5,6,7]
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm9 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm23
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm23[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm1[23],zero,zmm1[21,22,23,26],zero,zmm1[24],zero,zmm1[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm26
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,2,3],zmm26[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm24
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm24[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm28
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm27[0,1,2,3],zmm28[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm26
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm25[0,1,2,3],zmm26[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[27],zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm2[60],zero,zmm2[62,63,62,63],zero,zmm2[61],zero,zmm2[63,60,61]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm29
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm11[0,1,2,3],zmm29[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm27
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[0,1,2,3],zmm27[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[23],zero,zero,zero,zero,zmm3[26],zero,zmm3[24],zero,zero,zero,zero,zmm3[27],zero,zmm3[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm3[62],zero,zmm3[60],zero,zero,zero,zero,zmm3[63],zero,zmm3[61],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm22
+; AVX512BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
-; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm22 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %zmm27
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm4[0,1,2,3],zmm27[4,5,6,7]
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %zmm25
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm28[0,1,2,3],zmm25[4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm28
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm28[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61]
+; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
-; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rax), %zmm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm0
+; AVX512BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rax), %zmm31
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm1
; AVX512BW-FCP-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm6[28],zero,ymm6[30,31,30,31],zero,ymm6[29],zero,ymm6[31,28,29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm24[30],zero,ymm24[28],zero,zero,zero,zero,ymm24[31],zero,ymm24[29],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm8
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm7
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm18[30],zero,ymm18[28],zero,zero,zero,zero,ymm18[31],zero,ymm18[29],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm4, %ymm4
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm2
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm1
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm9
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm19[27],zero,zero,zero,zero,ymm19[30],zero,ymm19[28],zero,zero,zero,zero,ymm19[31],zero
-; AVX512BW-FCP-NEXT: vmovdqa64 %ymm19, %ymm21
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero,ymm20[29]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r9), %xmm5
-; AVX512BW-FCP-NEXT: vmovdqa 32(%r8), %xmm4
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm23
-; AVX512BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm23
-; AVX512BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm23 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm8
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm4, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm5
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm4
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm1
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm2
+; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm29, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm3
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm1
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm29
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm30
+; AVX512BW-FCP-NEXT: vporq %xmm29, %xmm30, %xmm29
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm30 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm30, %xmm30
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm30, %zmm29, %zmm29
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm29 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r9), %xmm30
+; AVX512BW-FCP-NEXT: vmovdqa 32(%r8), %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm30, %xmm0
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm18
+; AVX512BW-FCP-NEXT: vporq %xmm0, %xmm18, %xmm0
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm3[8],xmm30[8],xmm3[9],xmm30[9],xmm3[10],xmm30[10],xmm3[11],xmm30[11],xmm3[12],xmm30[12],xmm3[13],xmm30[13],xmm3[14],xmm30[14],xmm3[15],xmm30[15]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm18 = xmm18[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm18, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm18 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm18
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512BW-FCP-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k2}
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm0 {%k3}
; AVX512BW-FCP-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm23 {%k2}
-; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm15, %xmm3
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm14, %xmm4
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm4, %xmm4
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm3
-; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm17, %xmm2
-; AVX512BW-FCP-NEXT: vpshufb %xmm0, %xmm16, %xmm0
-; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm16[0],xmm17[0],xmm16[1],xmm17[1],xmm16[2],xmm17[2],xmm16[3],xmm17[3],xmm16[4],xmm17[4],xmm16[5],xmm17[5],xmm16[6],xmm17[6],xmm16[7],xmm17[7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm29 {%k3}
+; AVX512BW-FCP-NEXT: vpshufb %xmm13, %xmm19, %xmm0
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm6
+; AVX512BW-FCP-NEXT: vpor %xmm0, %xmm6, %xmm0
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm6, %xmm6
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm0
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm22, %xmm6
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm21, %xmm8
+; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3],xmm21[4],xmm22[4],xmm21[5],xmm22[5],xmm21[6],xmm22[6],xmm21[7],xmm22[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm8, %zmm6
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm6[0,1,0,1,4,5,4,5]
; AVX512BW-FCP-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512BW-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm2
-; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm2, %xmm2
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm10, %zmm2 # 32-byte Folded Reload
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
-; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm8, %zmm0 {%k3}
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm16, %xmm6
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm15[0],xmm16[0],xmm15[1],xmm16[1],xmm15[2],xmm16[2],xmm15[3],xmm16[3],xmm15[4],xmm16[4],xmm15[5],xmm16[5],xmm15[6],xmm16[6],xmm15[7],xmm16[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm7
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm31, %zmm8 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
+; AVX512BW-FCP-NEXT: vpermw %zmm8, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k2}
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
; AVX512BW-FCP-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm28, %zmm1
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm0 {%k3}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm13[28],zero,ymm13[30,31,30,31],zero,ymm13[29],zero,ymm13[31,28,29]
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm6, %ymm2
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[2,3,2,3],zmm2[0,1,0,1]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm30[0],xmm3[1],xmm30[1],xmm3[2],xmm30[2],xmm3[3],xmm30[3],xmm3[4],xmm30[4],xmm3[5],xmm30[5],xmm3[6],xmm30[6],xmm3[7],xmm30[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
+; AVX512BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
+; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[2,3,2,3],zmm1[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-FCP-NEXT: vpermw %zmm31, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
+; AVX512BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
+; AVX512BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm26, %zmm1
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[18,19,20,21],zero,zmm1[19],zero,zmm1[21,20,21,22],zero,zmm1[20],zero,zmm1[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero,zero,zero,zmm1[59],zero
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm27, %zmm3
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero,zero,zero,zmm3[59],zero,zmm3[57]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm3 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm24, %zmm3
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[18],zero,zmm3[18,19,20,21],zero,zmm3[19],zero,zmm3[25,26,27,22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[56,57],zero,zmm3[55],zero,zmm3[53,54,55,58],zero,zmm3[56],zero,zmm3[60,61,58,59]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm4 # 32-byte Folded Reload
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm23, %zmm4
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22],zero,zmm4[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero,zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm3, %zmm4, %zmm3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm20, %zmm30, %zmm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k2}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm1
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[20],zero,zmm1[18],zero,zmm1[20,21,20,21],zero,zmm1[19],zero,zmm1[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[56,57,56,57],zero,zmm1[55],zero,zmm1[55,56,57,58],zero,zmm1[56],zero,zmm1[62,63]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm21, %zmm27, %zmm4
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm4
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[20],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm4, %zmm1
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm4, %zmm2
+; AVX512BW-FCP-NEXT: vpermw %zmm8, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512BW-FCP-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm1 {%k1}
; AVX512BW-FCP-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, 320(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 320(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, 256(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 192(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512BW-FCP-NEXT: addq $40, %rsp
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm29, 256(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, 64(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm20, 384(%rax)
+; AVX512BW-FCP-NEXT: addq $104, %rsp
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -11069,8 +10826,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-NEXT: vmovdqa (%rax), %ymm13
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm27 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm13, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm13, %ymm0
; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
; AVX512DQ-BW-NEXT: # ymm1 = mem[0,1,0,1]
; AVX512DQ-BW-NEXT: vpermw %ymm13, %ymm1, %ymm1
@@ -11079,12 +10836,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm17 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm9, %ymm1
; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm10
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm10, %ymm2
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm10, %ymm2
; AVX512DQ-BW-NEXT: vpor %ymm1, %ymm2, %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm12
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm3
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm1
+; AVX512DQ-BW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm12
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm8
@@ -11094,270 +10852,257 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm14
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm14, %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %ymm16
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm16, %ymm4
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm15
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm15, %ymm4
; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm4
; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm5
; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm20
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm6, %zmm22
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %ymm18
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm18, %ymm2
; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %ymm19
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm19, %ymm6
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm19, %ymm6
; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm6, %ymm2
; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm6
; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm7
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm21, %zmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm23 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm23 = xmm23[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm23 = ymm23[0,1,0,1]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm23, %zmm3
; AVX512DQ-BW-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512DQ-BW-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm20, %zmm2 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm22, %zmm3 {%k1}
; AVX512DQ-BW-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512DQ-BW-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm2 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm3 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %ymm29
+; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %ymm30
+; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm30, %ymm8
+; AVX512DQ-BW-NEXT: vpor %ymm0, %ymm8, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm29, %ymm8
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm30, %ymm23
+; AVX512DQ-BW-NEXT: vporq %ymm8, %ymm23, %ymm8
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %ymm30
-; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm30, %ymm25
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm8, %ymm25, %ymm8
-; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm29, %ymm0
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm30, %ymm23
-; AVX512DQ-BW-NEXT: vporq %ymm0, %ymm23, %ymm0
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %ymm28
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm15
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %ymm16
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm16[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm25 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm23 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
; AVX512DQ-BW-NEXT: movl $676341840, %r10d # imm = 0x28502850
; AVX512DQ-BW-NEXT: kmovd %r10d, %k1
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm28, %ymm8 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm28, %ymm8 {%k1}
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm15, %ymm23
-; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm28, %ymm24
-; AVX512DQ-BW-NEXT: vporq %ymm23, %ymm24, %ymm23
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm23, %zmm8
+; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm16, %ymm24
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm28, %ymm25
+; AVX512DQ-BW-NEXT: vporq %ymm24, %ymm25, %ymm24
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm24, %zmm8
; AVX512DQ-BW-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
; AVX512DQ-BW-NEXT: kmovq %r10, %k2
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm8 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %ymm31
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm31, %ymm0
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm26 = ymm0[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm0, %ymm11
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm26, %ymm11, %ymm11
-; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm0, %ymm17
-; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm31, %ymm22
-; AVX512DQ-BW-NEXT: vporq %ymm17, %ymm22, %ymm17
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm17, %zmm11
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %ymm31
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm31, %ymm17
+; AVX512DQ-BW-NEXT: vmovdqa 32(%r8), %ymm1
+; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm1, %ymm21
+; AVX512DQ-BW-NEXT: vporq %ymm17, %ymm21, %ymm17
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm1, %ymm21
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm31, %ymm27
+; AVX512DQ-BW-NEXT: vporq %ymm21, %ymm27, %ymm21
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm21 = ymm21[2,3,2,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm21, %zmm17, %zmm21
; AVX512DQ-BW-NEXT: vmovdqa64 32(%rax), %ymm17
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm17, %ymm22
+; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm17, %ymm27
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm26 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512DQ-BW-NEXT: vpermw %ymm17, %ymm26, %ymm27
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm27, %zmm22, %zmm22
+; AVX512DQ-BW-NEXT: vpermw %ymm17, %ymm26, %ymm11
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
; AVX512DQ-BW-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm22, %zmm11 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm21 {%k3}
; AVX512DQ-BW-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm8 {%k3}
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm15[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm21, %zmm8 {%k3}
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm16[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[2,2,3,3,6,6,7,7]
; AVX512DQ-BW-NEXT: movl $338170920, %r10d # imm = 0x14281428
; AVX512DQ-BW-NEXT: kmovd %r10d, %k4
; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm27 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm28, %ymm11 {%k4}
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm28, %ymm22
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %ymm2, %ymm28, %ymm21
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm28 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm15, %ymm15
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm22, %ymm15, %ymm15
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm15, %zmm15
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm22 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm11, %ymm22, %ymm22
+; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm16, %ymm16
+; AVX512DQ-BW-NEXT: vporq %ymm21, %ymm16, %ymm16
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm16, %zmm16
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm11[0,2,3,3,4,6,7,7]
; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm30, %ymm30
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm29 = ymm29[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm29 = ymm29[0,2,3,3,4,6,7,7]
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm29, %ymm30 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm29 = ymm30[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm29, %zmm22, %zmm22
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm30, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm21, %ymm0 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm21 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm29[24,25],zero,ymm29[23],zero,ymm29[21,22,23,26],zero,ymm29[24],zero,ymm29[28,29,26,27]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm29 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm30[25],zero,ymm30[23],zero,zero,zero,zero,ymm30[26],zero,ymm30[24],zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vporq %ymm21, %ymm29, %ymm21
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm22, %zmm15 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %zmm29
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm31[0,1,2,3],zmm29[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm22[23],zero,zmm22[23,24,25,26],zero,zmm22[24],zero,zmm22[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm22[59],zero,zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vporq %zmm22, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm16 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %zmm0
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm29
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm31[0,1,2,3],zmm29[4,5,6,7]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm21[25],zero,zmm21[23],zero,zero,zero,zero,zmm21[26],zero,zmm21[24],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[59],zero,zero,zero,zero,zmm21[62],zero,zmm21[60],zero,zero,zero,zero,zmm21[63],zero,zmm21[61]
+; AVX512DQ-BW-NEXT: vporq %zmm1, %zmm21, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
; AVX512DQ-BW-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rax), %zmm22
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rax), %zmm21
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm1
; AVX512DQ-BW-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm15 {%k5}
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm19, %ymm0 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm19, %ymm1
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm18, %ymm25
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQ-BW-NEXT: vporq %ymm1, %ymm25, %ymm1
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %zmm1
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm1, %zmm1
-; AVX512DQ-BW-NEXT: vpshufb %zmm20, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm16 {%k5}
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm19, %ymm1 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %ymm2, %ymm19, %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm18, %ymm23
+; AVX512DQ-BW-NEXT: vporq %ymm2, %ymm23, %ymm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %zmm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vpshufb %zmm20, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %zmm20
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm20, %zmm20
-; AVX512DQ-BW-NEXT: vpshufb %zmm21, %zmm20, %zmm20
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm20, %zmm20
+; AVX512DQ-BW-NEXT: vpshufb %zmm22, %zmm20, %zmm20
+; AVX512DQ-BW-NEXT: vporq %zmm2, %zmm20, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vporq %zmm1, %zmm20, %zmm20
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm20 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm21
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm30, %zmm0
-; AVX512DQ-BW-NEXT: vpshufb %zmm23, %zmm0, %zmm0
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm23
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm29, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm22
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm29, %zmm1
; AVX512DQ-BW-NEXT: vpshufb %zmm24, %zmm1, %zmm1
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm24
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm23
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpshufb %zmm25, %zmm0, %zmm2
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %xmm0
+; AVX512DQ-BW-NEXT: vporq %zmm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %xmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vporq %zmm0, %zmm1, %zmm1
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %xmm25
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm22, %zmm13
-; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm26, %zmm0
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm21, %zmm13
+; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm26, %zmm24
; AVX512DQ-BW-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k5}
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %xmm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k5}
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %xmm24
; AVX512DQ-BW-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512DQ-BW-NEXT: kmovq %rax, %k5
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm20 {%k5}
; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %xmm1
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm18 = ymm18[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm26 = ymm18[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm19, %ymm26 {%k4}
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm18[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpshufb %ymm27, %ymm19, %ymm25 {%k4}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm24[0],xmm1[0],xmm24[1],xmm1[1],xmm24[2],xmm1[2],xmm24[3],xmm1[3],xmm24[4],xmm1[4],xmm24[5],xmm1[5],xmm24[6],xmm1[6],xmm24[7],xmm1[7]
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm19, %xmm19
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm26[2,3,2,3],zmm19[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm24[0],xmm25[0],xmm24[1],xmm25[1],xmm24[2],xmm25[2],xmm24[3],xmm25[3],xmm24[4],xmm25[4],xmm24[5],xmm25[5],xmm24[6],xmm25[6],xmm24[7],xmm25[7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm16, %ymm11
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm26, %xmm26
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm25 = zmm25[2,3,2,3],zmm19[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm26 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm15, %ymm11
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm26, %xmm15
; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[0,2,3,3,4,6,7,7]
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm14, %ymm11 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm26[0,1,0,1]
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm19, %zmm14 {%k2}
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm11[2,3,2,3],zmm15[0,1,0,1]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm25, %zmm14 {%k2}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3],xmm23[4],xmm22[4],xmm23[5],xmm22[5],xmm23[6],xmm22[6],xmm23[7],xmm22[7]
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm11, %xmm11
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm9[27],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm10[27],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm10, %ymm10
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm23[0],xmm21[0],xmm23[1],xmm21[1],xmm23[2],xmm21[2],xmm23[3],xmm21[3],xmm23[4],xmm21[4],xmm23[5],xmm21[5],xmm23[6],xmm21[6],xmm23[7],xmm21[7]
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm11, %xmm11
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-NEXT: vpermw %zmm22, %zmm11, %zmm11
+; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm10, %ymm9
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[2,3,2,3],zmm11[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm10 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512DQ-BW-NEXT: vpermw %zmm21, %zmm10, %zmm10
; AVX512DQ-BW-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm10 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm9 {%k1}
; AVX512DQ-BW-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm14 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm25, %xmm10
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm24, %xmm26
-; AVX512DQ-BW-NEXT: vporq %xmm10, %xmm26, %xmm10
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm25[8],xmm24[8],xmm25[9],xmm24[9],xmm25[10],xmm24[10],xmm25[11],xmm24[11],xmm25[12],xmm24[12],xmm25[13],xmm24[13],xmm25[14],xmm24[14],xmm25[15],xmm24[15]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm24 = xmm24[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm24, %zmm10, %zmm10
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm24 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-NEXT: vpshufb %xmm24, %xmm1, %xmm25
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-NEXT: vpshufb %xmm26, %xmm0, %xmm27
-; AVX512DQ-BW-NEXT: vporq %xmm25, %xmm27, %xmm25
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm25, %zmm0
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm10 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm14 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm2, %xmm9
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm0, %xmm25
+; AVX512DQ-BW-NEXT: vporq %xmm9, %xmm25, %xmm9
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm9, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-NEXT: vpshufb %xmm2, %xmm1, %xmm9
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm24, %xmm26
+; AVX512DQ-BW-NEXT: vporq %xmm9, %xmm26, %xmm9
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm24[8],xmm1[9],xmm24[9],xmm1[10],xmm24[10],xmm1[11],xmm24[11],xmm1[12],xmm24[12],xmm1[13],xmm24[13],xmm1[14],xmm24[14],xmm1[15],xmm24[15]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm9 {%k3}
; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm0 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm21, %xmm1
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm25 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm22, %zmm17, %zmm25
+; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm22, %xmm1
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm24 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm21, %zmm17, %zmm24
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm23, %xmm22
-; AVX512DQ-BW-NEXT: vporq %xmm1, %xmm22, %xmm1
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm21[8],xmm23[9],xmm21[9],xmm23[10],xmm21[10],xmm23[11],xmm21[11],xmm23[12],xmm21[12],xmm23[13],xmm21[13],xmm23[14],xmm21[14],xmm23[15],xmm21[15]
+; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm23, %xmm21
+; AVX512DQ-BW-NEXT: vporq %xmm1, %xmm21, %xmm1
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm21 = xmm21[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm21, %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm25, %zmm1 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm24, %zmm1 {%k1}
; AVX512DQ-BW-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm10 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm5, %xmm1
-; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm4, %xmm11
-; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm11, %xmm1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm9 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm4, %xmm10
+; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm10, %xmm1
; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm4, %xmm4
+; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm4, %xmm4
; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm1
-; AVX512DQ-BW-NEXT: vpshufb %xmm24, %xmm7, %xmm4
-; AVX512DQ-BW-NEXT: vpshufb %xmm26, %xmm6, %xmm5
-; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm5, %xmm5
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512DQ-BW-NEXT: vpshufb %xmm2, %xmm7, %xmm2
+; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm6, %xmm4
+; AVX512DQ-BW-NEXT: vpor %xmm2, %xmm4, %xmm2
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm4, %xmm4
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm2, %zmm4, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm4 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm12, %xmm0
-; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm3, %xmm1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-BW-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm12, %xmm1
; AVX512DQ-BW-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3],xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm1, %xmm1
; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm1, %zmm1
@@ -11367,334 +11112,319 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512DQ-BW-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, (%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, 320(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 256(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 256(%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, 192(%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, 128(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, 384(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 64(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, 384(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride7_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
-; AVX512DQ-BW-FCP-NEXT: subq $40, %rsp
+; AVX512DQ-BW-FCP-NEXT: subq $104, %rsp
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rax), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm28 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm28, %ymm0, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512DQ-BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm0, %ymm2, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm19
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %ymm20
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm22, %ymm20, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rax), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm2, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512DQ-BW-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm2, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm4
; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %xmm16
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm15
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm15[8],xmm16[8],xmm15[9],xmm16[9],xmm15[10],xmm16[10],xmm15[11],xmm16[11],xmm15[12],xmm16[12],xmm15[13],xmm16[13],xmm15[14],xmm16[14],xmm15[15],xmm16[15]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512DQ-BW-FCP-NEXT: movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm14
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm15
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm24
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm24, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm5, %ymm16, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm16
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm17
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm25 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm25 = xmm25[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %xmm17
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %xmm19
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm29 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm29, %xmm6, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
+; AVX512DQ-BW-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm23 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm7, %ymm21
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm6, %ymm21, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm21
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm22
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm24, %xmm24
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm24, %zmm14
; AVX512DQ-BW-FCP-NEXT: movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm13 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm14 {%k1}
; AVX512DQ-BW-FCP-NEXT: movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm13 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm3[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm26
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm11, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm4, %zmm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm27
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm27, %ymm26
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm27, %ymm21
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm12, %ymm21, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm12, %zmm12
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm14 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm24
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm5, %ymm24, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm25
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm25, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm9, %ymm23, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm25, %ymm24
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm23, %ymm24, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm9, %zmm9
; AVX512DQ-BW-FCP-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm12 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r8), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm21
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm21[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm26
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm25, %ymm26, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm22, %ymm4, %ymm22
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm1, %ymm22, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm25, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rax), %ymm31
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm22 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm31, %ymm22, %ymm22
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm28, %ymm31, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm25, %zmm22
-; AVX512DQ-BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm9 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r9), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm28
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm1, %ymm23, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rax), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm23 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm6, %ymm23, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm6, %ymm20
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20
+; AVX512DQ-BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm1 {%k2}
; AVX512DQ-BW-FCP-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm12 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm25
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm25[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm9 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm23
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm23[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm1[23],zero,zmm1[21,22,23,26],zero,zmm1[24],zero,zmm1[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm26
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,2,3],zmm26[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm24
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm24[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm28
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm27[0,1,2,3],zmm28[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm26
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm25[0,1,2,3],zmm26[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[27],zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm2[60],zero,zmm2[62,63,62,63],zero,zmm2[61],zero,zmm2[63,60,61]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm29
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm11[0,1,2,3],zmm29[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm27
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[0,1,2,3],zmm27[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[23],zero,zero,zero,zero,zmm3[26],zero,zmm3[24],zero,zero,zero,zero,zmm3[27],zero,zmm3[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm3[62],zero,zmm3[60],zero,zero,zero,zero,zmm3[63],zero,zmm3[61],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm22
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm22 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %zmm27
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm4[0,1,2,3],zmm27[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %zmm25
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm28[0,1,2,3],zmm25[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm28
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm28[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61]
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm30
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm30[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rax), %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm0
+; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rax), %zmm31
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm1
; AVX512DQ-BW-FCP-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm22 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm6[28],zero,ymm6[30,31,30,31],zero,ymm6[29],zero,ymm6[31,28,29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm24[30],zero,ymm24[28],zero,zero,zero,zero,ymm24[31],zero,ymm24[29],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm1, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm8
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm18[30],zero,ymm18[28],zero,zero,zero,zero,ymm18[31],zero,ymm18[29],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm4, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm1
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm19[27],zero,zero,zero,zero,ymm19[30],zero,ymm19[28],zero,zero,zero,zero,ymm19[31],zero
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %ymm19, %ymm21
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero,ymm20[29]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm6, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r9), %xmm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r8), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm23
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm23
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm3, %xmm23, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm23 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm23 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm8
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm4, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm10, %zmm31, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm20 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm29, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm29
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm30
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm29, %xmm30, %xmm29
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm30 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm30, %xmm30
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm30, %zmm29, %zmm29
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm29 = zmm29[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm29 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r9), %xmm30
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r8), %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm30, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm18
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm0, %xmm18, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm3[8],xmm30[8],xmm3[9],xmm30[9],xmm3[10],xmm30[10],xmm3[11],xmm30[11],xmm3[12],xmm30[12],xmm3[13],xmm30[13],xmm3[14],xmm30[14],xmm3[15],xmm30[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm18 = xmm18[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm18, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm18 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm31, %zmm6, %zmm18
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k2}
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm0 {%k3}
; AVX512DQ-BW-FCP-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm23 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm15, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm14, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm4, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm17, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm0, %xmm16, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm16[0],xmm17[0],xmm16[1],xmm17[1],xmm16[2],xmm17[2],xmm16[3],xmm17[3],xmm16[4],xmm17[4],xmm16[5],xmm17[5],xmm16[6],xmm17[6],xmm16[7],xmm17[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm29 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm13, %xmm19, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm17, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm0, %xmm6, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm6, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm22, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm21, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3],xmm21[4],xmm22[4],xmm21[5],xmm22[5],xmm21[6],xmm22[6],xmm21[7],xmm22[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm8, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm6[0,1,0,1,4,5,4,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm10, %zmm2 # 32-byte Folded Reload
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm8, %zmm0 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm16, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm15[0],xmm16[0],xmm15[1],xmm16[1],xmm15[2],xmm16[2],xmm15[3],xmm16[3],xmm15[4],xmm16[4],xmm15[5],xmm16[5],xmm15[6],xmm16[6],xmm15[7],xmm16[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm31, %zmm8 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm8, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k2}
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3}
; AVX512DQ-BW-FCP-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm24, %zmm28, %zmm1
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm0 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm13[28],zero,ymm13[30,31,30,31],zero,ymm13[29],zero,ymm13[31,28,29]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm6, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm2[2,3,2,3],zmm1[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[2,3,2,3],zmm2[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm30[0],xmm3[1],xmm30[1],xmm3[2],xmm30[2],xmm3[3],xmm30[3],xmm3[4],xmm30[4],xmm3[5],xmm30[5],xmm3[6],xmm30[6],xmm3[7],xmm30[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[2,3,2,3],zmm1[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm31, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm26, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[18,19,20,21],zero,zmm1[19],zero,zmm1[21,20,21,22],zero,zmm1[20],zero,zmm1[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero,zero,zero,zmm1[59],zero
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm27, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero,zero,zero,zmm3[59],zero,zmm3[57]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm3 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm24, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[18],zero,zmm3[18,19,20,21],zero,zmm3[19],zero,zmm3[25,26,27,22],zero,zmm3[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm3[56,57],zero,zmm3[55],zero,zmm3[53,54,55,58],zero,zmm3[56],zero,zmm3[60,61,58,59]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm23, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22],zero,zmm4[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm3, %zmm4, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm20, %zmm30, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm28, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[20],zero,zmm1[18],zero,zmm1[20,21,20,21],zero,zmm1[19],zero,zmm1[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[56,57,56,57],zero,zmm1[55],zero,zmm1[55,56,57,58],zero,zmm1[56],zero,zmm1[62,63]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm21, %zmm27, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm25, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[20],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm4, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm4, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm8, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm3 {%k1}
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, 320(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 320(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, 256(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 192(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, 384(%rax)
-; AVX512DQ-BW-FCP-NEXT: addq $40, %rsp
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm29, 256(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, 64(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm20, 384(%rax)
+; AVX512DQ-BW-FCP-NEXT: addq $104, %rsp
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
index dfa7f2d..c981d97 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -177,6 +177,36 @@ define <16 x float> @shuffle_v16f32_02_03_16_17_06_07_20_21_10_11_24_25_14_15_28
ret <16 x float> %shuffle
}
+; PR86076
+define <16 x float> @shuffle_f32_v16f32_00_08_01_09_02_10_03_11_04_12_05_13_06_14_07_15(float %a0, float %a1) {
+; ALL-LABEL: shuffle_f32_v16f32_00_08_01_09_02_10_03_11_04_12_05_13_06_14_07_15:
+; ALL: # %bb.0:
+; ALL-NEXT: vbroadcastss %xmm0, %ymm0
+; ALL-NEXT: vbroadcastss %xmm1, %ymm1
+; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
+; ALL-NEXT: retq
+ %v0 = insertelement <8 x float> poison, float %a0, i64 0
+ %v1 = insertelement <8 x float> poison, float %a1, i64 0
+ %b0 = shufflevector <8 x float> %v0, <8 x float> poison, <8 x i32> zeroinitializer
+ %b1 = shufflevector <8 x float> %v1, <8 x float> poison, <8 x i32> zeroinitializer
+ %r = shufflevector <8 x float> %b0, <8 x float> %b1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <16 x float> %r
+}
+
+; PR86076
+define <16 x float> @shuffle_f32_v16f32_00_08_00_08_00_08_00_08_00_08_00_08_00_08_00_08(float %a0, float %a1) {
+; ALL-LABEL: shuffle_f32_v16f32_00_08_00_08_00_08_00_08_00_08_00_08_00_08_00_08:
+; ALL: # %bb.0:
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
+; ALL-NEXT: retq
+ %v0 = insertelement <8 x float> poison, float %a0, i64 0
+ %v1 = insertelement <8 x float> poison, float %a1, i64 0
+ %sv = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8>
+ ret <16 x float> %sv
+}
+
define <16 x i32> @shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; ALL: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vpdpwssd.ll b/llvm/test/CodeGen/X86/vpdpwssd.ll
new file mode 100644
index 0000000..e6a07b4
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vpdpwssd.ll
@@ -0,0 +1,12 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+fast-dpwssd | FileCheck %s
+
+define <16 x i32> @vpdpwssd_test(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2) {
+; CHECK-LABEL: vpdpwssd_test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpdpwssd %zmm2, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %4 = tail call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+ ret <16 x i32> %4
+}
diff --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index 68f2ed4..be249dd 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fadd_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fadd_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fadd_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fadd_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX512F-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fadd_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fadd_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm4, %xmm1
+; SSE-NEXT: addps %xmm4, %xmm2
+; SSE-NEXT: addps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %z2 = fadd <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %w2 = fadd <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index ac208da..9aa9d63 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fmul_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fmul_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fmul_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fmul_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT: mulps %xmm2, %xmm0
+; SSE-NEXT: mulps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX512F-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fmul_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fmul_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT: mulps %xmm4, %xmm0
+; SSE-NEXT: mulps %xmm4, %xmm1
+; SSE-NEXT: mulps %xmm4, %xmm2
+; SSE-NEXT: mulps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX1OR2-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vmulps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %z2 = fmul <4 x float> %z, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %w2 = fmul <4 x float> %w, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index 90cf455..60e54ab 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fsub_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fsub_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fsub_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fsub_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: subps %xmm2, %xmm0
+; SSE-NEXT: subps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX512F-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fsub_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fsub_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: subps %xmm4, %xmm0
+; SSE-NEXT: subps %xmm4, %xmm1
+; SSE-NEXT: subps %xmm4, %xmm2
+; SSE-NEXT: subps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %z2 = fsub <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %w2 = fsub <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/DebugInfo/AArch64/ptrauth.ll b/llvm/test/DebugInfo/AArch64/ptrauth.ll
new file mode 100644
index 0000000..5d9099f
--- /dev/null
+++ b/llvm/test/DebugInfo/AArch64/ptrauth.ll
@@ -0,0 +1,70 @@
+; RUN: llc %s -filetype=obj -mtriple arm64e-apple-darwin -o - \
+; RUN: | llvm-dwarfdump - | FileCheck %s
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 0, 0x04d2)")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d2)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d3)")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d3)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d4, "isa-pointer")")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d4)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_isa_pointer (true)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d5, "authenticates-null-values")")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d5)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_authenticates_null_values (true)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d6, "isa-pointer,authenticates-null-values")")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d6)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_isa_pointer (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_authenticates_null_values (true)
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+@p = global ptr null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!10}
+!llvm.module.flags = !{!19, !20}
+
+!0 = !DIGlobalVariableExpression(var: !5, expr: !DIExpression())
+!1 = !DIGlobalVariableExpression(var: !6, expr: !DIExpression())
+!2 = !DIGlobalVariableExpression(var: !7, expr: !DIExpression())
+!3 = !DIGlobalVariableExpression(var: !8, expr: !DIExpression())
+!4 = !DIGlobalVariableExpression(var: !9, expr: !DIExpression())
+!5 = distinct !DIGlobalVariable(name: "p1", scope: !10, file: !11, line: 1, type: !14, isLocal: false, isDefinition: true)
+!6 = distinct !DIGlobalVariable(name: "p2", scope: !10, file: !11, line: 1, type: !15, isLocal: false, isDefinition: true)
+!7 = distinct !DIGlobalVariable(name: "p3", scope: !10, file: !11, line: 1, type: !16, isLocal: false, isDefinition: true)
+!8 = distinct !DIGlobalVariable(name: "p4", scope: !10, file: !11, line: 1, type: !17, isLocal: false, isDefinition: true)
+!9 = distinct !DIGlobalVariable(name: "p5", scope: !10, file: !11, line: 1, type: !18, isLocal: false, isDefinition: true)
+!10 = distinct !DICompileUnit(language: DW_LANG_C99, file: !11, emissionKind: FullDebug, globals: !13)
+!11 = !DIFile(filename: "/tmp/p.c", directory: "/")
+!12 = !{}
+!13 = !{!0,!1,!2,!3,!4}
+!14 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: false, ptrAuthExtraDiscriminator: 1234)
+!15 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1235)
+!16 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1236, ptrAuthIsaPointer: true)
+!17 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1237, ptrAuthAuthenticatesNullValues: true)
+!18 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1238, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: true)
+!19 = !{i32 2, !"Dwarf Version", i32 4}
+!20 = !{i32 2, !"Debug Info Version", i32 3}
+!21 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null)
diff --git a/llvm/test/DebugInfo/ARM/hardware-loop-phi-insertion.ll b/llvm/test/DebugInfo/ARM/hardware-loop-phi-insertion.ll
new file mode 100644
index 0000000..9240bf2
--- /dev/null
+++ b/llvm/test/DebugInfo/ARM/hardware-loop-phi-insertion.ll
@@ -0,0 +1,84 @@
+; RUN: llc --stop-after=hardware-loops < %s | FileCheck %s
+
+;; Tests that Hardware Loop Insertion does not insert new phi nodes after debug
+;; records when they appear immediately after the last existing phi node.
+
+; CHECK-LABEL: for.body:
+; CHECK-NEXT: = phi i32
+; CHECK-NEXT: = phi i32
+; CHECK-NEXT: call void @llvm.dbg.value
+
+source_filename = "repro.c"
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv8.1m.main-arm-none-eabi"
+
+@z = dso_local local_unnamed_addr global i32 42, align 4, !dbg !0
+@arr = dso_local local_unnamed_addr global [10 x i32] zeroinitializer, align 4, !dbg !5
+
+define dso_local void @func1() local_unnamed_addr #0 !dbg !18 {
+entry:
+ %0 = load i32, ptr @z, align 4, !tbaa !26
+ br label %for.body, !dbg !30
+
+for.body: ; preds = %entry, %for.body
+ %p1.04 = phi ptr [ @arr, %entry ], [ %incdec.ptr, %for.body ]
+ %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ tail call void @llvm.dbg.value(metadata ptr %p1.04, metadata !23, metadata !DIExpression()), !dbg !25
+ store i32 %0, ptr %p1.04, align 4, !dbg !32, !tbaa !26
+ %inc = add nuw nsw i32 %i.03, 1, !dbg !34
+ %incdec.ptr = getelementptr inbounds i8, ptr %p1.04, i32 4, !dbg !35
+ %exitcond.not = icmp eq i32 %inc, 10, !dbg !36
+ br i1 %exitcond.not, label %for.end, label %for.body, !dbg !30, !llvm.loop !37
+
+for.end: ; preds = %for.body
+ ret void, !dbg !41
+}
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!11, !12, !13, !14, !15, !16}
+!llvm.ident = !{!17}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "z", scope: !2, file: !3, line: 2, type: !8, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C11, file: !3, producer: "clang version 19.0.0git", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: None)
+!3 = !DIFile(filename: "repro.c", directory: "/home/gbtozers/dev/upstream-llvm")
+!4 = !{!0, !5}
+!5 = !DIGlobalVariableExpression(var: !6, expr: !DIExpression())
+!6 = distinct !DIGlobalVariable(name: "arr", scope: !2, file: !3, line: 1, type: !7, isLocal: false, isDefinition: true)
+!7 = !DICompositeType(tag: DW_TAG_array_type, baseType: !8, size: 320, elements: !9)
+!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!9 = !{!10}
+!10 = !DISubrange(count: 10)
+!11 = !{i32 7, !"Dwarf Version", i32 5}
+!12 = !{i32 2, !"Debug Info Version", i32 3}
+!13 = !{i32 1, !"wchar_size", i32 4}
+!14 = !{i32 1, !"min_enum_size", i32 4}
+!15 = !{i32 7, !"frame-pointer", i32 2}
+!16 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+!17 = !{!"clang version 19.0.0git"}
+!18 = distinct !DISubprogram(name: "func1", scope: !3, file: !3, line: 4, type: !19, scopeLine: 5, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2, retainedNodes: !21)
+!19 = !DISubroutineType(types: !20)
+!20 = !{null}
+!21 = !{!23}
+!22 = !DILocalVariable(name: "i", scope: !18, file: !3, line: 6, type: !8)
+!23 = !DILocalVariable(name: "p1", scope: !18, file: !3, line: 7, type: !24)
+!24 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !8, size: 32)
+!25 = !DILocation(line: 0, scope: !18)
+!26 = !{!27, !27, i64 0}
+!27 = !{!"int", !28, i64 0}
+!28 = !{!"omnipotent char", !29, i64 0}
+!29 = !{!"Simple C/C++ TBAA"}
+!30 = !DILocation(line: 8, column: 3, scope: !31)
+!31 = distinct !DILexicalBlock(scope: !18, file: !3, line: 8, column: 3)
+!32 = !DILocation(line: 9, column: 10, scope: !33)
+!33 = distinct !DILexicalBlock(scope: !31, file: !3, line: 8, column: 3)
+!34 = !DILocation(line: 8, column: 27, scope: !33)
+!35 = !DILocation(line: 8, column: 32, scope: !33)
+!36 = !DILocation(line: 8, column: 21, scope: !33)
+!37 = distinct !{!37, !30, !38, !39, !40}
+!38 = !DILocation(line: 9, column: 12, scope: !31)
+!39 = !{!"llvm.loop.mustprogress"}
+!40 = !{!"llvm.loop.unroll.disable"}
+!41 = !DILocation(line: 10, column: 1, scope: !18)
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir
index cece656..5ebd1a8 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir
@@ -100,6 +100,7 @@ registers:
- { id: 38, class: gr8 }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir
index f0af938..b0bff30 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir
@@ -87,6 +87,7 @@ liveins:
- { reg: '$edi', virtual-reg: '%0' }
- { reg: '$xmm0', virtual-reg: '%1' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.if.then:
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir
index 51d3f7e..d73d8a3 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir
@@ -97,6 +97,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%4' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir
index bc1c7eb..6460263 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir
@@ -106,6 +106,7 @@ liveins:
- { reg: '$rsi', virtual-reg: '%5' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir
index d59333e..68c9bf6 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir
@@ -70,6 +70,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir
index ab2647d..cf17af4b 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir
@@ -71,6 +71,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir
index 0fe8098..cb35bd8 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir
@@ -65,6 +65,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir
index 2a031b2..61dcec4 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir
@@ -94,6 +94,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir b/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
index 47a7b46..e80ed2e 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
@@ -106,6 +106,7 @@ liveins:
- { reg: '$rdi', virtual-reg: '%15' }
frameInfo:
maxAlignment: 8
+ adjustsStack: true
hasCalls: true
stack:
- { id: 0, size: 8, alignment: 8 }
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir b/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir
index 3e806e4..6dbd2cd 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir
@@ -113,6 +113,7 @@ liveins:
- { reg: '$rdi', virtual-reg: '%2' }
- { reg: '$esi', virtual-reg: '%4' }
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir b/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir
index 35ab906..5df7009 100644
--- a/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir
+++ b/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir
@@ -72,6 +72,8 @@
name: fn2
alignment: 4
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
registers:
- { id: 0, class: gpr32, preferred-register: '' }
- { id: 1, class: gpr32, preferred-register: '' }
diff --git a/llvm/test/DebugInfo/MIR/X86/debug-loc-0.mir b/llvm/test/DebugInfo/MIR/X86/debug-loc-0.mir
index 56a4d835..0b00745 100644
--- a/llvm/test/DebugInfo/MIR/X86/debug-loc-0.mir
+++ b/llvm/test/DebugInfo/MIR/X86/debug-loc-0.mir
@@ -75,7 +75,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
index 3cb9da8..4d48774 100644
--- a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
+++ b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
@@ -116,7 +116,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 0
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir
index 35d12b5..e618f48 100644
--- a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir
+++ b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir
@@ -114,7 +114,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 0
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir b/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir
index 037306a..42ee73d 100644
--- a/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir
+++ b/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir
@@ -100,6 +100,7 @@ liveins:
- { reg: '$rdi', virtual-reg: '%2' }
- { reg: '$esi', virtual-reg: '%4' }
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/X86/prolog-epilog-indirection.mir b/llvm/test/DebugInfo/MIR/X86/prolog-epilog-indirection.mir
index 6941467..4df967c 100644
--- a/llvm/test/DebugInfo/MIR/X86/prolog-epilog-indirection.mir
+++ b/llvm/test/DebugInfo/MIR/X86/prolog-epilog-indirection.mir
@@ -104,6 +104,7 @@ alignment: 16
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
stack:
- { id: 0, name: l_1081, type: default, offset: 0, size: 4, alignment: 4,
diff --git a/llvm/test/DebugInfo/NVPTX/no-extra-loc.ll b/llvm/test/DebugInfo/NVPTX/no-extra-loc.ll
new file mode 100644
index 0000000..6d3a69b
--- /dev/null
+++ b/llvm/test/DebugInfo/NVPTX/no-extra-loc.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64-nvidia-cuda | %ptxas-verify %}
+
+
+define i32 @foo(i32 %a, i32 %b) !dbg !3 {
+
+; CHECK: .loc [[FILE:[0-9]+]] 26 0 // extra-lineinfo.cu:26:0
+; CHECK-NOT: .loc [[FILE]] 26 0 // extra-lineinfo.cu:26:0
+; CHECK: .file [[FILE]] "/test/directory/extra-lineinfo.cu"
+
+ %add = add i32 %b, %a, !dbg !6
+ ret i32 %add, !dbg !6
+}
+
+!llvm.dbg.cu = !{!0}
+!nvvm.annotations = !{}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang", isOptimized: true, runtimeVersion: 0, emissionKind: DebugDirectivesOnly)
+!1 = !DIFile(filename: "extra-lineinfo.cu", directory: "/test/directory/")
+!2 = !{i32 1, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "kernel", linkageName: "foo", scope: !1, file: !1, line: 123, type: !4, scopeLine: 26, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!4 = !DISubroutineType(types: !5)
+!5 = !{}
+!6 = !DILocation(line: 40, column: 22, scope: !31)
+!31 = distinct !DILexicalBlock(scope: !3, file: !1, line: 3, column: 17)
diff --git a/llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll b/llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll
new file mode 100644
index 0000000..e61b725
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll
@@ -0,0 +1,65 @@
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -start-after=codegenprepare -stop-before=finalize-isel -o - %s -experimental-debug-variable-locations=false | FileCheck %s
+
+; Input to this test was created by reducing a Swift file using bugpoint
+
+; CHECK-DAG: ![[LHS:.*]] = !DILocalVariable(name: "lhs"
+
+define hidden i64 @"_wideDivide42"(ptr %0, ptr %1, ptr %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8) local_unnamed_addr !dbg !16 {
+; CHECK-LABEL: name: _wideDivide42
+; CHECK-NOT: DBG_VALUE
+; CHECK: DBG_VALUE $rcx, $noreg, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 0, 64)
+; CHECK-NEXT: DBG_VALUE $r8, $noreg, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 64, 64)
+; CHECK-NEXT: DBG_VALUE $r9, $noreg, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 128, 64)
+; CHECK-NEXT: DBG_VALUE %fixed-stack.{{.+}}, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 192, 64)
+; The duplicates should be removed:
+; CHECK-NOT: DBG_VALUE
+
+entry:
+ %9 = alloca i64, align 8
+ call void @llvm.dbg.value(metadata i64 %3, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %4, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 64, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %3, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %4, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 64, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %5, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 128, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %6, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 192, 64)), !dbg !67
+ br i1 poison, label %11, label %10, !dbg !68
+
+10: ; preds = %entry
+ tail call void asm sideeffect "", "n"(i32 7) #7
+ unreachable
+
+11: ; preds = %entry
+ tail call void @abort()
+ unreachable
+}
+
+declare void @abort()
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+attributes #7 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!13}
+!llvm.linker.options = !{!14, !15}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_Swift, file: !1, producer: "Swift", isOptimized: true, runtimeVersion: 6, emissionKind: FullDebug)
+!1 = !DIFile(filename: "Int128.swift", directory: "")
+!13 = !{i32 2, !"Debug Info Version", i32 3}
+!14 = !{!"-lswiftCore"}
+!15 = !{!"-lobjc"}
+!16 = distinct !DISubprogram(name: "_wideDivide42", scope: !0, file: !1, line: 222, type: !17, scopeLine: 222, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !23)
+!17 = !DISubroutineType(types: !18)
+!18 = !{!19, !20, !20, !20, !20, !20, !20}
+!19 = !DICompositeType(tag: DW_TAG_structure_type, name: "4 x UInt64", flags: DIFlagFwdDecl, runtimeLang: DW_LANG_Swift)
+!20 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "UInt64", scope: !1, file: !1, size: 64, elements: !22, runtimeLang: DW_LANG_Swift)
+!22 = !{}
+!23 = !{!24, !27}
+!24 = !DILocalVariable(name: "lhs", arg: 1, scope: !16, file: !1, line: 223, type: !25, flags: DIFlagArtificial)
+!25 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !26)
+!26 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "2 x 2 x UInt64", file: !1, size: 256, elements: !22, runtimeLang: DW_LANG_Swift)
+!27 = !DILocalVariable(name: "rhs", arg: 2, scope: !16, file: !1, line: 223, type: !28, flags: DIFlagArtificial)
+!28 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !29)
+!29 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "2 x UInt64", file: !1, size: 128, elements: !22, runtimeLang: DW_LANG_Swift)
+!67 = !DILocation(line: 0, scope: !16)
+!68 = !DILocation(line: 225, column: 9, scope: !16)
diff --git a/llvm/test/DebugInfo/X86/live-debug-vars-dse.mir b/llvm/test/DebugInfo/X86/live-debug-vars-dse.mir
index 9443ed5..9088890 100644
--- a/llvm/test/DebugInfo/X86/live-debug-vars-dse.mir
+++ b/llvm/test/DebugInfo/X86/live-debug-vars-dse.mir
@@ -107,7 +107,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
index c5b6d73..3beaf89 100644
--- a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
+++ b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
@@ -99,6 +99,8 @@
---
name: f1
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: x.addr, type: default, offset: 0, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
@@ -127,6 +129,8 @@ body: |
---
name: f2
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: x.addr, type: default, offset: 0, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
diff --git a/llvm/test/DebugInfo/X86/prolog-params.mir b/llvm/test/DebugInfo/X86/prolog-params.mir
index af21bc8..6629dca 100644
--- a/llvm/test/DebugInfo/X86/prolog-params.mir
+++ b/llvm/test/DebugInfo/X86/prolog-params.mir
@@ -98,6 +98,8 @@ fixedStack:
isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true }
- { id: 2, type: default, offset: 0, size: 4, alignment: 16, stack-id: default,
isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true }
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: arr, type: default, offset: 0, size: 8, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true }
diff --git a/llvm/test/DebugInfo/X86/tu-to-non-tu.ll b/llvm/test/DebugInfo/X86/tu-to-non-tu.ll
index 3ad97ad..f80bd8b 100644
--- a/llvm/test/DebugInfo/X86/tu-to-non-tu.ll
+++ b/llvm/test/DebugInfo/X86/tu-to-non-tu.ll
@@ -156,14 +156,14 @@
%struct.templ_non_tu.1 = type { ptr }
@_ZTV6non_tu = dso_local unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI6non_tu, ptr @_ZN6non_tu2f1Ev] }, align 8
-@v1 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV6non_tu, i32 0, inrange i32 0, i32 2) } }, align 8, !dbg !0
+@v1 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV6non_tu, i32 0, i32 0, i32 2) } }, align 8, !dbg !0
@v5 = dso_local global %struct.ref_internal zeroinitializer, align 1, !dbg !5
@_ZTV12templ_non_tuIiE = dso_local unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI12templ_non_tuIiE, ptr @_ZN12templ_non_tuIiE2f1Ev] }, align 8
-@v2 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV12templ_non_tuIiE, i32 0, inrange i32 0, i32 2) } }, align 8, !dbg !13
+@v2 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV12templ_non_tuIiE, i32 0, i32 0, i32 2) } }, align 8, !dbg !13
@_ZTV12templ_non_tuIlE = dso_local unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI12templ_non_tuIlE, ptr @_ZN12templ_non_tuIlE2f1Ev] }, align 8
-@v3 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV12templ_non_tuIlE, i32 0, inrange i32 0, i32 2) } }, align 8, !dbg !32
+@v3 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV12templ_non_tuIlE, i32 0, i32 0, i32 2) } }, align 8, !dbg !32
@_ZTV12templ_non_tuIbE = dso_local unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI12templ_non_tuIbE, ptr @_ZN12templ_non_tuIbE2f1Ev] }, align 8
-@v4 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV12templ_non_tuIbE, i32 0, inrange i32 0, i32 2) } }, align 8, !dbg !46
+@v4 = dso_local global { { ptr } } { { ptr } { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV12templ_non_tuIbE, i32 0, i32 0, i32 2) } }, align 8, !dbg !46
@v6 = dso_local global %class.ref_internal_template zeroinitializer, align 1, !dbg !60
@v7 = dso_local global %class.ref_from_ref_internal_template zeroinitializer, align 1, !dbg !69
@_ZTVN10__cxxabiv117__class_type_infoE = external dso_local global ptr
diff --git a/llvm/test/DebugInfo/dpvalue-print-nocrash.ll b/llvm/test/DebugInfo/dpvalue-print-nocrash.ll
index 0a618c6..d8cb542 100755
--- a/llvm/test/DebugInfo/dpvalue-print-nocrash.ll
+++ b/llvm/test/DebugInfo/dpvalue-print-nocrash.ll
@@ -1,4 +1,4 @@
-;; Tests that we can debug-print DPValues that have no markers attached.
+;; Tests that we can debug-print DbgVariableRecords that have no markers attached.
; RUN: opt -passes="instcombine" -debug %s -o /dev/null 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/llvm/test/DebugInfo/print-non-instruction-debug-info.ll b/llvm/test/DebugInfo/print-non-instruction-debug-info.ll
index 2e76561..490f24f 100644
--- a/llvm/test/DebugInfo/print-non-instruction-debug-info.ll
+++ b/llvm/test/DebugInfo/print-non-instruction-debug-info.ll
@@ -26,6 +26,8 @@
; CHECK-NEXT: {{^}} store i32 %[[VAL_ADD]]{{.+}}, !DIAssignID ![[ASSIGNID:[0-9]+]]
; OLDDBG-NEXT: call void @llvm.dbg.assign(metadata i32 %[[VAL_ADD]], metadata ![[VAR_B]], metadata !DIExpression(), metadata ![[ASSIGNID]], metadata ptr %[[VAL_B]], metadata !DIExpression()), !dbg ![[LOC_4:[0-9]+]]
; NEWDBG-NEXT: {{^}} #dbg_assign(i32 %[[VAL_ADD]], ![[VAR_B]], !DIExpression(), ![[ASSIGNID]], ptr %[[VAL_B]], !DIExpression(), ![[LOC_4:[0-9]+]])
+; OLDDBG-NEXT: call void @llvm.dbg.assign(metadata ![[EMPTY:[0-9]+]], metadata ![[VAR_B]], metadata !DIExpression(), metadata ![[ASSIGNID]], metadata ![[EMPTY]], metadata !DIExpression()), !dbg ![[LOC_4]]
+; NEWDBG-NEXT: {{^}} #dbg_assign(![[EMPTY:[0-9]+]], ![[VAR_B]], !DIExpression(), ![[ASSIGNID]], ![[EMPTY]], !DIExpression(), ![[LOC_4]])
; CHECK-NEXT: {{^}} ret i32
; OLDDBG-DAG: declare void @llvm.dbg.value
@@ -40,6 +42,7 @@
; CHECK-DAG: ![[LOC_3]] = !DILocation(line: 3, column: 25
; CHECK-DAG: ![[LOC_4]] = !DILocation(line: 3, column: 30
; CHECK-DAG: ![[LABEL_ID]] = !DILabel(
+; CHECK-DAG: ![[EMPTY]] = !{}
define dso_local i32 @f(i32 %a) !dbg !7 {
entry:
@@ -51,6 +54,7 @@ entry:
call void @llvm.dbg.label(metadata !50), !dbg !32
store i32 %add, ptr %b, !dbg !32, !DIAssignID !40
call void @llvm.dbg.assign(metadata i32 %add, metadata !21, metadata !DIExpression(), metadata !40, metadata ptr %b, metadata !DIExpression()), !dbg !33
+ call void @llvm.dbg.assign(metadata !2, metadata !21, metadata !DIExpression(), metadata !40, metadata !2, metadata !DIExpression()), !dbg !33
ret i32 %add, !dbg !33
}
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s
new file mode 100644
index 0000000..f8e7ba9
--- /dev/null
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s
@@ -0,0 +1,43 @@
+# RUN: llvm-mc -triple=aarch64-unknown-linux-gnu -position-independent \
+# RUN: -filetype=obj -o %t.o %s
+# RUN: llvm-jitlink -noexec -check %s %t.o
+
+ .text
+ .file "elf_section_start_stop.c"
+ .globl main
+ .p2align 2
+ .type main,@function
+main:
+ adrp x8, z
+ adrp x9, y
+ ldr w8, [x8, :lo12:z]
+ ldr w9, [x9, :lo12:y]
+ sub w0, w8, w9
+ ret
+.Lfunc_end0:
+ .size main, .Lfunc_end0-main
+
+ .type x,@object
+ .section custom_section,"aw",@progbits
+ .globl x
+ .p2align 2
+x:
+ .word 42
+ .size x, 4
+
+# jitlink-check: *{8}z = (*{8}y) + 4
+
+ .type y,@object
+ .data
+ .globl y
+ .p2align 3, 0x0
+y:
+ .xword __start_custom_section
+ .size y, 8
+
+ .type z,@object
+ .globl z
+ .p2align 3, 0x0
+z:
+ .xword __stop_custom_section
+ .size z, 8
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s
new file mode 100644
index 0000000..8862dd8
--- /dev/null
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s
@@ -0,0 +1,30 @@
+# RUN: llvm-mc -triple=arm64-apple-darwin24 -filetype=obj -o %t.o %s
+# RUN: llvm-jitlink -noexec -check %s %t.o
+
+# jitlink-check: *{8}_z = (*{8}_y) + 4
+
+ .section __TEXT,__text,regular,pure_instructions
+ .globl _main
+ .p2align 2
+_main:
+ mov w0, #0
+ ret
+
+ .section __DATA,__custom_section
+ .globl _x
+ .p2align 2, 0x0
+_x:
+ .long 42
+
+ .section __DATA,__data
+ .globl _y
+ .p2align 3, 0x0
+_y:
+ .quad section$start$__DATA$__custom_section
+
+ .globl _z
+ .p2align 3, 0x0
+_z:
+ .quad section$end$__DATA$__custom_section
+
+.subsections_via_symbols
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll b/llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll
new file mode 100644
index 0000000..f9040af
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll
@@ -0,0 +1,46 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --global-value-regex "x" --version 4
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64-linux-gnu -hwasan-globals=0 | FileCheck %s --check-prefixes=NOGLOB
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64-linux-gnu -hwasan-globals=1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+@x = dso_local global i32 0, align 4
+
+;.
+; NOGLOB: @x = dso_local global i32 0, align 4
+;.
+; CHECK: @x = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @x.hwasan to i64), i64 5260204364768739328) to ptr)
+;.
+define dso_local noundef i32 @_Z3tmpv() sanitize_hwaddress {
+; NOGLOB-LABEL: define dso_local noundef i32 @_Z3tmpv(
+; NOGLOB-SAME: ) #[[ATTR0:[0-9]+]] {
+; NOGLOB-NEXT: entry:
+; NOGLOB-NEXT: [[TMP0:%.*]] = load i32, ptr @x, align 4
+; NOGLOB-NEXT: ret i32 [[TMP0]]
+;
+; CHECK-LABEL: define dso_local noundef i32 @_Z3tmpv(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__hwasan_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[TMP12]], 4294967295
+; CHECK-NEXT: [[HWASAN_SHADOW:%.*]] = add i64 [[TMP1]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[HWASAN_SHADOW]] to ptr
+; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 ptrtoint (ptr @x to i64), 56
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i8
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 ptrtoint (ptr @x to i64), 72057594037927935
+; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP5]], 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP4]], [[TMP8]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF2:![0-9]+]]
+; CHECK: 10:
+; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[TMP2]], ptr @x, i32 2)
+; CHECK-NEXT: br label [[TMP11]]
+; CHECK: 11:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @x, align 4
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %0 = load i32, ptr @x, align 4
+ ret i32 %0
+}
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
index 4bb846b..62fd7a1 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
@@ -48,13 +48,12 @@ define dso_local noundef i1 @_Z6targetv() sanitize_hwaddress {
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP26]], i8 [[TMP22]], i64 256, i1 false)
; CHECK-NEXT: [[CALL:%.*]] = call i32 @setjmp(ptr noundef @jbuf)
; CHECK-NEXT: switch i32 [[CALL]], label [[WHILE_BODY:%.*]] [
-; CHECK-NEXT: i32 1, label [[RETURN:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB1:%.*]]
+; CHECK-NEXT: i32 1, label [[RETURN:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_BB1:%.*]]
; CHECK-NEXT: ]
; CHECK: sw.bb1:
; CHECK-NEXT: br label [[RETURN]]
; CHECK: while.body:
-; CHECK-NEXT: call void @llvm.hwasan.check.memaccess(ptr [[TMP16]], ptr @stackbuf, i32 19)
; CHECK-NEXT: store ptr [[BUF_HWASAN]], ptr @stackbuf, align 8
; CHECK-NEXT: call void @may_jump()
; CHECK-NEXT: br label [[RETURN]]
diff --git a/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail6.ll b/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail6.ll
index 5d068872..4359d53 100644
--- a/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail6.ll
+++ b/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail6.ll
@@ -85,7 +85,6 @@ exit:
ret void
}
-; FIXME: The fakeresume1 here should be marked as musttail.
; Verify that in the resume part resume call is marked with musttail.
; CHECK-LABEL: @f.resume(
; CHECK: musttail call fastcc void @fakeresume1(ptr align 8 null)
diff --git a/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail7.ll b/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail7.ll
index 6ea81c6..2a14be0 100644
--- a/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail7.ll
+++ b/llvm/test/Instrumentation/InstrProfiling/Coro/coro-split-musttail7.ll
@@ -88,7 +88,6 @@ exit:
ret void
}
-; FIXME: The fakeresume1 here should be marked as musttail.
; Verify that in the resume part resume call is marked with musttail.
; CHECK-LABEL: @f.resume(
; CHECK: musttail call fastcc void @fakeresume1(ptr align 8 null)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
index 96ac4b6..9133b32 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
@@ -758,7 +758,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -808,7 +808,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -851,7 +851,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -901,7 +901,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -936,7 +936,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -986,7 +986,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1021,7 +1021,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1071,7 +1071,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1106,7 +1106,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1156,7 +1156,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1191,7 +1191,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1241,7 +1241,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1276,7 +1276,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1326,7 +1326,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1361,7 +1361,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1411,7 +1411,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1446,7 +1446,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1496,7 +1496,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1531,7 +1531,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1581,7 +1581,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1616,7 +1616,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1666,7 +1666,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1701,7 +1701,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1751,7 +1751,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
index 1535fcc..e0b5907 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
@@ -39,7 +39,7 @@ define i64 @foo(i64 %guard, ...) #1 {
; Only 56 bytes of the register save area is copied, because of
; "use-soft-float".
-; CHECK: call void @llvm.va_start(ptr %vl)
+; CHECK: call void @llvm.va_start.p0(ptr %vl)
; CHECK: [[VlAddr:%.*]] = ptrtoint ptr %vl to i64
; CHECK: [[RegSaveAreaAddrAddr:%.*]] = add i64 [[VlAddr]], 24
; CHECK: [[RegSaveAreaAddr:%.*]] = inttoptr i64 [[RegSaveAreaAddrAddr]] to ptr
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
index aff4d2c..2051015 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
@@ -560,7 +560,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -580,7 +580,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -623,7 +623,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -643,7 +643,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -678,7 +678,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -698,7 +698,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -733,7 +733,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -753,7 +753,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -788,7 +788,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -808,7 +808,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -843,7 +843,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -863,7 +863,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -898,7 +898,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -918,7 +918,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -953,7 +953,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -973,7 +973,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1008,7 +1008,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1028,7 +1028,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1063,7 +1063,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1083,7 +1083,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1118,7 +1118,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1138,7 +1138,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1173,7 +1173,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1193,7 +1193,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
index 21f3311..f07f3ad 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
@@ -542,7 +542,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[TMP27]], 17592186044416, !dbg [[DBG11]]
; CHECK-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr, !dbg [[DBG11]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.va_start(ptr [[VA]]), !dbg [[DBG11]]
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]]), !dbg [[DBG11]]
; CHECK-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]]
; CHECK-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], 16, !dbg [[DBG11]]
; CHECK-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr, !dbg [[DBG11]]
diff --git a/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll b/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll
index 76afc4b..8b387cd 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll
@@ -78,6 +78,26 @@ entry:
; CHECK-LABEL: atomic8_xchg_monotonic
; CHECK: call i8 @__tsan_atomic8_exchange(ptr %a, i8 0, i32 0), !dbg
+define void @atomic8_xchg_monotonic_ptr(ptr %a, ptr %b) nounwind uwtable {
+entry:
+ atomicrmw xchg ptr %a, ptr %b monotonic, !dbg !7
+ ret void, !dbg !7
+}
+; CHECK-LABEL: atomic8_xchg_monotonic_ptr
+; CHECK: [[ARG:%.*]] = ptrtoint ptr %b to i64, !dbg
+; CHECK: [[RES:%.*]] = call i64 @__tsan_atomic64_exchange(ptr %a, i64 [[ARG]], i32 0), !dbg
+; CHECK: [[CAST:%.*]] = inttoptr i64 [[RES]] to ptr, !dbg
+
+define void @atomic8_xchg_monotonic_float(ptr %a, float %b) nounwind uwtable {
+entry:
+ atomicrmw xchg ptr %a, float %b monotonic, !dbg !7
+ ret void, !dbg !7
+}
+; CHECK-LABEL: atomic8_xchg_monotonic_float
+; CHECK: [[ARG:%.*]] = bitcast float %b to i32, !dbg
+; CHECK: [[RES:%.*]] = call i32 @__tsan_atomic32_exchange(ptr %a, i32 [[ARG]], i32 0), !dbg
+; CHECK: [[CAST:%.*]] = bitcast i32 [[RES]] to float, !dbg
+
define void @atomic8_add_monotonic(ptr %a) nounwind uwtable {
entry:
atomicrmw add ptr %a, i8 0 monotonic, !dbg !7
diff --git a/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll b/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll
index 74d9c86..ccf8cf6 100644
--- a/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll
+++ b/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll
@@ -32,7 +32,6 @@ entry:
; CHECK-DUMP: <main>:
; CHECK-DUMP: bl 0x8 <main+0x8>
; CHECK-DUMP: <foo>:
-; CHECK-DUMP: paciasp
; `main` doesn't support BTI while `foo` does, so in the binary
; we should see only PAC which is supported by both.
diff --git a/llvm/test/LTO/AArch64/link-sign-return-address.ll b/llvm/test/LTO/AArch64/link-sign-return-address.ll
deleted file mode 100644
index c25857c..0000000
--- a/llvm/test/LTO/AArch64/link-sign-return-address.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; Testcase to check that module with different branch-target-enforcement can
-; be mixed.
-;
-; RUN: llvm-as %s -o %t1.bc
-; RUN: llvm-as %p/Inputs/foo.ll -o %t2.bc
-; RUN: llvm-lto -exported-symbol main \
-; RUN: -exported-symbol foo \
-; RUN: -filetype=obj \
-; RUN: %t2.bc %t1.bc \
-; RUN: -o %t1.exe 2>&1
-; RUN: llvm-objdump -d %t1.exe | FileCheck --check-prefix=CHECK-DUMP %s
-; RUN: llvm-readelf -n %t1.exe | FileCheck --allow-empty --check-prefix=CHECK-PROP %s
-
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-unknown-linux-gnu"
-
-declare i32 @foo();
-
-define i32 @main() {
-entry:
- %add = call i32 @foo()
- ret i32 %add
-}
-
-!llvm.module.flags = !{!0, !1, !2, !3 }
-!0 = !{i32 8, !"branch-target-enforcement", i32 0}
-!1 = !{i32 8, !"sign-return-address", i32 0}
-!2 = !{i32 8, !"sign-return-address-all", i32 0}
-!3 = !{i32 8, !"sign-return-address-with-bkey", i32 0}
-
-; CHECK-DUMP: <foo>:
-; CHECK-DUMP: paciasp
-; CHECK-DUMP: mov w0, #0x2a
-; CHECK-DUMP: autiasp
-; CHECK-DUMP: ret
-; CHECK-DUMP: <main>:
-; CHECK-DUMP-NOT: paciasp
-; CHECK-DUMP: str x30,
-; CHECK-DUMP: bl 0x14 <main+0x4>
-
-; `main` doesn't support PAC sign-return-address while `foo` does, so in the binary
-; we should not see anything.
-; CHECK-PROP-NOT: Properties: aarch64 feature: PAC \ No newline at end of file
diff --git a/llvm/test/Linker/link-arm-and-thumb.ll b/llvm/test/Linker/link-arm-and-thumb.ll
index 37bd8c3..a90f212 100644
--- a/llvm/test/Linker/link-arm-and-thumb.ll
+++ b/llvm/test/Linker/link-arm-and-thumb.ll
@@ -13,12 +13,11 @@ entry:
ret i32 %add
}
-; CHECK: define i32 @main() [[MAIN_ATTRS:#[0-9]+]]
+; CHECK: define i32 @main() {
; CHECK: define i32 @foo(i32 %a, i32 %b) [[ARM_ATTRS:#[0-9]+]]
; CHECK: define i32 @bar(i32 %a, i32 %b) [[THUMB_ATTRS:#[0-9]+]]
-; CHECK: attributes [[MAIN_ATTRS]] = { {{.*}} }
-; CHECK: attributes [[ARM_ATTRS]] = { {{.*}} "target-features"="-thumb-mode" }
-; CHECK: attributes [[THUMB_ATTRS]] = { {{.*}} "target-features"="+thumb-mode" }
+; CHECK: attributes [[ARM_ATTRS]] = { "target-features"="-thumb-mode" }
+; CHECK: attributes [[THUMB_ATTRS]] = { "target-features"="+thumb-mode" }
; STDERR-NOT: warning: Linking two modules of different target triples:
diff --git a/llvm/test/MC/AArch64/coff-relocations.s b/llvm/test/MC/AArch64/coff-relocations.s
index fb67a21..2370fd9 100644
--- a/llvm/test/MC/AArch64/coff-relocations.s
+++ b/llvm/test/MC/AArch64/coff-relocations.s
@@ -1,7 +1,11 @@
// RUN: llvm-mc -triple aarch64-windows -filetype obj -o %t.obj %s
-// RUN: llvm-readobj -r %t.obj | FileCheck %s
+// RUN: llvm-mc -triple arm64ec-windows -filetype obj -o %t-ec.obj %s
+// RUN: llvm-readobj -r %t.obj | FileCheck %s --check-prefixes=CHECK,CHECK-ARM64
+// RUN: llvm-readobj -r %t-ec.obj | FileCheck %s --check-prefixes=CHECK,CHECK-ARM64EC
// RUN: llvm-objdump --no-print-imm-hex -d %t.obj | FileCheck %s --check-prefix=DISASM
+// RUN: llvm-objdump --no-print-imm-hex -d %t-ec.obj | FileCheck %s --check-prefix=DISASM
// RUN: llvm-objdump -s %t.obj | FileCheck %s --check-prefix=DATA
+// RUN: llvm-objdump -s %t-ec.obj | FileCheck %s --check-prefix=DATA
// IMAGE_REL_ARM64_ADDR32
.Linfo_foo:
@@ -71,8 +75,10 @@ tbz x0, #0, target
// IMAGE_REL_ARM64_REL32 because IMAGE_REL_ARM64_REL64 does not exist.
.xword .Linfo_foo - .Ltable
-// CHECK: Format: COFF-ARM64
-// CHECK: Arch: aarch64
+// CHECK-ARM64: Format: COFF-ARM64
+// CHECK-ARM64EC: Format: COFF-ARM64EC
+// CHECK-ARM64: Arch: aarch64
+// CHECK-ARM64EC: Arch: aarch64
// CHECK: AddressSize: 64bit
// CHECK: Relocations [
// CHECK: Section (1) .text {
diff --git a/llvm/test/MC/AArch64/constant-pool-sizes.s b/llvm/test/MC/AArch64/constant-pool-sizes.s
new file mode 100644
index 0000000..279402a
--- /dev/null
+++ b/llvm/test/MC/AArch64/constant-pool-sizes.s
@@ -0,0 +1,25 @@
+// RUN: llvm-mc -triple aarch64-none-linux-gnu %s | FileCheck %s
+
+ ldr w0, =symbol
+ ldr x1, =symbol
+
+ ldr w2, =1234567890
+ ldr x3, =1234567890
+
+// CHECK: ldr w0, .Ltmp0
+// CHECK: ldr x1, .Ltmp1
+// CHECK: ldr w2, .Ltmp2
+// CHECK: ldr x3, .Ltmp3
+
+// CHECK: .p2align 2, 0x0
+// CHECK-NEXT:.Ltmp0:
+// CHECK-NEXT: .word symbol
+// CHECK: .p2align 3, 0x0
+// CHECK-NEXT:.Ltmp1:
+// CHECK-NEXT: .xword symbol
+// CHECK: .p2align 2, 0x0
+// CHECK-NEXT:.Ltmp2:
+// CHECK-NEXT: .word 1234567890
+// CHECK: .p2align 3, 0x0
+// CHECK-NEXT:.Ltmp3:
+// CHECK-NEXT: .xword 1234567890
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_sop1.s b/llvm/test/MC/AMDGPU/gfx11_asm_sop1.s
index 8a7f643..c4029b06 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_sop1.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_sop1.s
@@ -2964,6 +2964,9 @@ s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_SAVE_WAVE)
s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA)
// GFX11: encoding: [0x85,0x4c,0x80,0xbe]
+s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA_TO_PC)
+// GFX11: encoding: [0x86,0x4c,0x80,0xbe]
+
s_ctz_i32_b32 s5, s1
// GFX11: encoding: [0x01,0x08,0x85,0xbe]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s b/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s
deleted file mode 100644
index fdfbf65..0000000
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s
+++ /dev/null
@@ -1,278 +0,0 @@
-// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -show-encoding %s | FileCheck -check-prefix=GCN %s
-// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s | FileCheck -check-prefix=GCN %s
-
-v_interp_p10_f32 v0, v1, v2, v3
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v1, v10, v20, v30
-// GCN: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04]
-
-v_interp_p10_f32 v2, v11, v21, v31
-// GCN: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04]
-
-v_interp_p10_f32 v3, v12, v22, v32
-// GCN: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, -v1, v2, v3
-// GCN: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p10_f32 v0, v1, -v2, v3
-// GCN: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p10_f32 v0, v1, v2, -v3
-// GCN: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v1, v10, v20, v30
-// GCN: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04]
-
-v_interp_p2_f32 v2, v11, v21, v31
-// GCN: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04]
-
-v_interp_p2_f32 v3, v12, v22, v32
-// GCN: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, -v1, v2, v3
-// GCN: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p2_f32 v0, v1, -v2, v3
-// GCN: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p2_f32 v0, v1, v2, -v3
-// GCN: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p10_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p10_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p2_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p2_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s b/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s
index 4fd355f..939320e 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s
@@ -3708,9 +3708,12 @@ s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_SAVE_WAVE)
s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA)
// GFX12: encoding: [0x85,0x4c,0x80,0xbe]
-s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_SE_AID_ID)
+s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA_TO_PC)
// GFX12: encoding: [0x86,0x4c,0x80,0xbe]
+s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_SE_AID_ID)
+// GFX12: encoding: [0x87,0x4c,0x80,0xbe]
+
s_ctz_i32_b32 s5, s1
// GFX12: encoding: [0x01,0x08,0x85,0xbe]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vbuffer_mubuf.s b/llvm/test/MC/AMDGPU/gfx12_asm_vbuffer_mubuf.s
index 08ec5b3..efeaf83 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vbuffer_mubuf.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vbuffer_mubuf.s
@@ -3970,70 +3970,70 @@ buffer_atomic_max_u64 v[5:6], off, s[8:11], s3 offset:8388607 dlc
buffer_atomic_max_u64 v[5:6], off, s[8:11], s3 offset:8388607 glc slc dlc
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v255, off, s[8:11], s3 offset:8388607
+buffer_atomic_min_num_f32 v255, off, s[8:11], s3 offset:8388607
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[12:15], s3 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[12:15], s3 offset:8388607
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[96:99], s3 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[96:99], s3 offset:8388607
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[8:11], s101 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[8:11], s101 offset:8388607
// GFX12: encoding: [0x65,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[8:11], m0 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[8:11], m0 offset:8388607
// GFX12: encoding: [0x7d,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[8:11], 0 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[8:11], 0 offset:8388607
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, off, s[8:11], -1 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[8:11], -1 offset:8388607
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, off, s[8:11], 0.5 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[8:11], 0.5 offset:8388607
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, off, s[8:11], -4.0 offset:8388607
+buffer_atomic_min_num_f32 v5, off, s[8:11], -4.0 offset:8388607
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, v0, s[8:11], s3 idxen offset:8388607
+buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, v0, s[8:11], s3 offen offset:8388607
+buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 offen offset:8388607
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[8:11], s3
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:0
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:0
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:7
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:7
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
// GFX12: encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607 glc
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 glc
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607 slc
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 slc
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607 dlc
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 dlc
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
-buffer_atomic_min_f32 v5, off, s[8:11], s3 offset:8388607 glc slc dlc
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 glc slc dlc
// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607
diff --git a/llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s b/llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s
new file mode 100644
index 0000000..4623500
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s
@@ -0,0 +1,27 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// OBJDUMP: 0000 00000000 0f000000 00000000 00000000
+
+.text
+
+.p2align 8
+.type caller,@function
+caller:
+ s_endpgm
+
+.rodata
+
+.p2align 6
+.amdhsa_kernel caller
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_private_segment_fixed_size max(7, callee1.private_seg_size, callee2.private_seg_size)
+.end_amdhsa_kernel
+
+.set callee1.private_seg_size, 4
+.set callee2.private_seg_size, 15
+
+// ASM: .amdhsa_private_segment_fixed_size max(7, callee1.private_seg_size, callee2.private_seg_size)
diff --git a/llvm/test/MC/AMDGPU/hsa-gfx12-v4.s b/llvm/test/MC/AMDGPU/hsa-gfx12-v4.s
index 8b90e20..7b59190 100644
--- a/llvm/test/MC/AMDGPU/hsa-gfx12-v4.s
+++ b/llvm/test/MC/AMDGPU/hsa-gfx12-v4.s
@@ -29,7 +29,7 @@
// OBJDUMP-NEXT: 0000 00000000 00000000 00000000 00000000
// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
-// OBJDUMP-NEXT: 0030 00000c60 80000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00000c60 80000000 00040000 00000000
// complete
// OBJDUMP-NEXT: 0040 01000000 01000000 08000000 00000000
// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
@@ -39,12 +39,12 @@
// OBJDUMP-NEXT: 0080 00000000 00000000 00000000 00000000
// OBJDUMP-NEXT: 0090 00000000 00000000 00000000 00000000
// OBJDUMP-NEXT: 00a0 00000000 00000000 00000000 00000000
-// OBJDUMP-NEXT: 00b0 00000060 80000000 00000000 00000000
+// OBJDUMP-NEXT: 00b0 00000060 80000000 00040000 00000000
// disabled_user_sgpr
// OBJDUMP-NEXT: 00c0 00000000 00000000 00000000 00000000
// OBJDUMP-NEXT: 00d0 00000000 00000000 00000000 00000000
// OBJDUMP-NEXT: 00e0 00000000 00000000 00000000 00000000
-// OBJDUMP-NEXT: 00f0 00000c60 80000000 00000000 00000000
+// OBJDUMP-NEXT: 00f0 00000c60 80000000 00040000 00000000
.text
// ASM: .text
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s b/llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s
new file mode 100644
index 0000000..fab3e89
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s
@@ -0,0 +1,281 @@
+// RUN: not llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a %s 2>&1 | FileCheck --check-prefix=ASM %s
+
+// Some expression currently require (immediately) solvable expressions, i.e.,
+// they don't depend on yet-unknown symbolic values.
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type user_sgpr_count,@function
+user_sgpr_count:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_count
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_count defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_count
+
+.p2align 8
+.type user_sgpr_private_segment_buffer,@function
+user_sgpr_private_segment_buffer:
+ s_endpgm
+
+.amdhsa_kernel user_sgpr_private_segment_buffer
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_private_segment_buffer defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer
+
+.p2align 8
+.type user_sgpr_kernarg_preload_length,@function
+user_sgpr_kernarg_preload_length:
+ s_endpgm
+
+.amdhsa_kernel user_sgpr_kernarg_preload_length
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_kernarg_preload_length defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length defined_boolean
+
+.p2align 8
+.type user_sgpr_kernarg_preload_offset,@function
+user_sgpr_kernarg_preload_offset:
+ s_endpgm
+
+.amdhsa_kernel user_sgpr_kernarg_preload_offset
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_kernarg_preload_offset defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset defined_boolean
+
+.p2align 8
+.type user_sgpr_dispatch_ptr,@function
+user_sgpr_dispatch_ptr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_dispatch_ptr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_dispatch_ptr defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr
+
+.p2align 8
+.type user_sgpr_queue_ptr,@function
+user_sgpr_queue_ptr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_queue_ptr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_queue_ptr defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr
+
+.p2align 8
+.type user_sgpr_kernarg_segment_ptr,@function
+user_sgpr_kernarg_segment_ptr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_kernarg_segment_ptr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_kernarg_segment_ptr defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr
+
+.p2align 8
+.type user_sgpr_dispatch_id,@function
+user_sgpr_dispatch_id:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_dispatch_id
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_dispatch_id defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id
+
+.p2align 8
+.type user_sgpr_flat_scratch_init,@function
+user_sgpr_flat_scratch_init:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_flat_scratch_init
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_flat_scratch_init defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init
+
+.p2align 8
+.type user_sgpr_private_segment_size,@function
+user_sgpr_private_segment_size:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_private_segment_size
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_private_segment_size defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size
+
+.p2align 8
+.type wavefront_size32,@function
+wavefront_size32:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel wavefront_size32
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_wavefront_size32 defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_wavefront_size32
+
+.p2align 8
+.type next_free_vgpr,@function
+next_free_vgpr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel next_free_vgpr
+ .amdhsa_next_free_vgpr defined_boolean
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_next_free_vgpr
+
+.p2align 8
+.type next_free_sgpr,@function
+next_free_sgpr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel next_free_sgpr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr defined_boolean
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_next_free_sgpr
+
+.p2align 8
+.type accum_offset,@function
+accum_offset:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel accum_offset
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_accum_offset
+
+.p2align 8
+.type reserve_vcc,@function
+reserve_vcc:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel reserve_vcc
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_reserve_vcc defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_reserve_vcc
+
+.p2align 8
+.type reserve_flat_scratch,@function
+reserve_flat_scratch:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel reserve_flat_scratch
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_reserve_flat_scratch defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_reserve_flat_scratch
+
+.p2align 8
+.type shared_vgpr_count,@function
+shared_vgpr_count:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel shared_vgpr_count
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_shared_vgpr_count defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_shared_vgpr_count
+
+.set defined_boolean, 1
+
+// ASM: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s
new file mode 100644
index 0000000..95af59c
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s
@@ -0,0 +1,190 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1010 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0afe4 801f007f 000c0000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0afe4 801f007f 000c0000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1)>>0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&32)>>5
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_wavefront_size32 (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1024)>>10
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_workgroup_processor_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&536870912)>>29
+// ASM-NEXT: .amdhsa_memory_ordered (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&1073741824)>>30
+// ASM-NEXT: .amdhsa_forward_progress (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2147483648)>>31
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_wavefront_size32 1
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_workgroup_processor_mode 1
+// ASM-NEXT: .amdhsa_memory_ordered 1
+// ASM-NEXT: .amdhsa_forward_progress 1
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s
new file mode 100644
index 0000000..e1107fb
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s
@@ -0,0 +1,186 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1100 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0afe4 811f007f 000c0000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0afe4 811f007f 000c0000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_wavefront_size32 (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1024)>>10
+// ASM-NEXT: .amdhsa_enable_private_segment (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_workgroup_processor_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&536870912)>>29
+// ASM-NEXT: .amdhsa_memory_ordered (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&1073741824)>>30
+// ASM-NEXT: .amdhsa_forward_progress (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2147483648)>>31
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_wavefront_size32 1
+// ASM-NEXT: .amdhsa_enable_private_segment 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_workgroup_processor_mode 1
+// ASM-NEXT: .amdhsa_memory_ordered 1
+// ASM-NEXT: .amdhsa_forward_progress 1
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s
new file mode 100644
index 0000000..449616d
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s
@@ -0,0 +1,184 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1200 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f02fe4 811f007f 000c0000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f02fe4 811f007f 000c0000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_round_robin_scheduling defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_round_robin_scheduling defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_wavefront_size32 (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1024)>>10
+// ASM-NEXT: .amdhsa_enable_private_segment (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_workgroup_processor_mode (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&536870912)>>29
+// ASM-NEXT: .amdhsa_memory_ordered (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&1073741824)>>30
+// ASM-NEXT: .amdhsa_forward_progress (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&2147483648)>>31
+// ASM-NEXT: .amdhsa_round_robin_scheduling (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_wavefront_size32 1
+// ASM-NEXT: .amdhsa_enable_private_segment 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_workgroup_processor_mode 1
+// ASM-NEXT: .amdhsa_memory_ordered 1
+// ASM-NEXT: .amdhsa_forward_progress 1
+// ASM-NEXT: .amdhsa_round_robin_scheduling 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s
new file mode 100644
index 0000000..c7e0544
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s
@@ -0,0 +1,168 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx700 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0af00 801f007f 00080000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0af00 801f007f 00080000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer (((0&(~2048))|(defined_boolean<<11))&1)>>0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((0&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((0&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((0&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((0&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init (((0&(~2048))|(defined_boolean<<11))&32)>>5
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((0&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s
new file mode 100644
index 0000000..49a5015
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s
@@ -0,0 +1,171 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx801 < %s | FileCheck --check-prefix=ASM %s
+
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx801 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0af00 801f007f 00080000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0af00 801f007f 00080000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer (((0&(~2048))|(defined_boolean<<11))&1)>>0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((0&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((0&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((0&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((0&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init (((0&(~2048))|(defined_boolean<<11))&32)>>5
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((0&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s
new file mode 100644
index 0000000..b7f8923
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s
@@ -0,0 +1,148 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000100
+// OBJDUMP-NEXT: 0030 0000ac04 81000000 00000000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000100
+// OBJDUMP-NEXT: 0070 0000ac04 81000000 00000000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_system_sgpr_private_segment_wavefront_offset defined_boolean
+ .amdhsa_dx10_clamp defined_boolean
+ .amdhsa_ieee_mode defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_tg_split defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_system_sgpr_private_segment_wavefront_offset defined_boolean
+ .amdhsa_dx10_clamp defined_boolean
+ .amdhsa_ieee_mode defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_tg_split defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_accum_offset (((((((0&(~65536))|(defined_boolean<<16))&(~63))|(0<<0))&63)>>0)+1)*4
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_tg_split (((((0&(~65536))|(defined_boolean<<16))&(~63))|(0<<0))&65536)>>16
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 0
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 0
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_accum_offset 4
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 0
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_tg_split 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 0
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 0
+// ASM-NEXT: .amdhsa_exception_int_div_zero 0
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-tg-split.s b/llvm/test/MC/AMDGPU/hsa-tg-split.s
new file mode 100644
index 0000000..5a4d3e2
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-tg-split.s
@@ -0,0 +1,74 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -mattr=+xnack,+tgsplit < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -mattr=+xnack,+tgsplit -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// OBJDUMP: Contents of section .rodata
+// OBJDUMP-NEXT: 0000 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000100
+// OBJDUMP-NEXT: 0030 0000ac00 80000000 00000000 00000000
+
+.text
+// ASM: .text
+
+.amdgcn_target "amdgcn-amd-amdhsa--gfx90a:xnack+"
+// ASM: .amdgcn_target "amdgcn-amd-amdhsa--gfx90a:xnack+"
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type minimal,@function
+minimal:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel minimal
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel minimal
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 0
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 0
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_accum_offset 4
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 0
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 0
+// ASM-NEXT: .amdhsa_tg_split 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 0
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 0
+// ASM-NEXT: .amdhsa_exception_int_div_zero 0
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/mcexpr_amd.s b/llvm/test/MC/AMDGPU/mcexpr_amd.s
new file mode 100644
index 0000000..a9639c3a
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/mcexpr_amd.s
@@ -0,0 +1,130 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -filetype=obj < %s > %t
+// RUN: llvm-objdump --syms %t | FileCheck --check-prefix=OBJDUMP %s
+
+// OBJDUMP: SYMBOL TABLE:
+// OBJDUMP-NEXT: 0000000000000000 l *ABS* 0000000000000000 zero
+// OBJDUMP-NEXT: 0000000000000001 l *ABS* 0000000000000000 one
+// OBJDUMP-NEXT: 0000000000000002 l *ABS* 0000000000000000 two
+// OBJDUMP-NEXT: 0000000000000003 l *ABS* 0000000000000000 three
+// OBJDUMP-NEXT: 7fffffffffffffff l *ABS* 0000000000000000 i64_max
+// OBJDUMP-NEXT: 8000000000000000 l *ABS* 0000000000000000 i64_min
+// OBJDUMP-NEXT: 0000000000000005 l *ABS* 0000000000000000 max_expression_all
+// OBJDUMP-NEXT: 0000000000000005 l *ABS* 0000000000000000 five
+// OBJDUMP-NEXT: 0000000000000004 l *ABS* 0000000000000000 four
+// OBJDUMP-NEXT: 0000000000000002 l *ABS* 0000000000000000 max_expression_two
+// OBJDUMP-NEXT: 0000000000000001 l *ABS* 0000000000000000 max_expression_one
+// OBJDUMP-NEXT: 000000000000000a l *ABS* 0000000000000000 max_literals
+// OBJDUMP-NEXT: 000000000000000f l *ABS* 0000000000000000 max_with_max_sym
+// OBJDUMP-NEXT: 000000000000000f l *ABS* 0000000000000000 max
+// OBJDUMP-NEXT: ffffffffffffffff l *ABS* 0000000000000000 neg_one
+// OBJDUMP-NEXT: ffffffffffffffff l *ABS* 0000000000000000 max_neg_numbers
+// OBJDUMP-NEXT: ffffffffffffffff l *ABS* 0000000000000000 max_neg_number
+// OBJDUMP-NEXT: 0000000000000003 l *ABS* 0000000000000000 max_with_subexpr
+// OBJDUMP-NEXT: 0000000000000006 l *ABS* 0000000000000000 max_as_subexpr
+// OBJDUMP-NEXT: 0000000000000005 l *ABS* 0000000000000000 max_recursive_subexpr
+// OBJDUMP-NEXT: 7fffffffffffffff l *ABS* 0000000000000000 max_expr_one_max
+// OBJDUMP-NEXT: 7fffffffffffffff l *ABS* 0000000000000000 max_expr_two_max
+// OBJDUMP-NEXT: 7fffffffffffffff l *ABS* 0000000000000000 max_expr_three_max
+// OBJDUMP-NEXT: 8000000000000000 l *ABS* 0000000000000000 max_expr_one_min
+// OBJDUMP-NEXT: 0000000000000003 l *ABS* 0000000000000000 max_expr_two_min
+// OBJDUMP-NEXT: 0000000000989680 l *ABS* 0000000000000000 max_expr_three_min
+// OBJDUMP-NEXT: 0000000000000007 l *ABS* 0000000000000000 or_expression_all
+// OBJDUMP-NEXT: 0000000000000003 l *ABS* 0000000000000000 or_expression_two
+// OBJDUMP-NEXT: 0000000000000001 l *ABS* 0000000000000000 or_expression_one
+// OBJDUMP-NEXT: 000000000000000f l *ABS* 0000000000000000 or_literals
+// OBJDUMP-NEXT: 0000000000000000 l *ABS* 0000000000000000 or_false
+// OBJDUMP-NEXT: 00000000000000ff l *ABS* 0000000000000000 or_with_or_sym
+// OBJDUMP-NEXT: 00000000000000ff l *ABS* 0000000000000000 or
+// OBJDUMP-NEXT: 0000000000000003 l *ABS* 0000000000000000 or_with_subexpr
+// OBJDUMP-NEXT: 0000000000000008 l *ABS* 0000000000000000 or_as_subexpr
+// OBJDUMP-NEXT: 0000000000000007 l *ABS* 0000000000000000 or_recursive_subexpr
+
+// ASM: .set zero, 0
+// ASM: .set one, 1
+// ASM: .set two, 2
+// ASM: .set three, 3
+// ASM: .set i64_max, 9223372036854775807
+// ASM: .set i64_min, -9223372036854775808
+
+.set zero, 0
+.set one, 1
+.set two, 2
+.set three, 3
+.set i64_max, 0x7FFFFFFFFFFFFFFF
+.set i64_min, 0x8000000000000000
+
+// ASM: .set max_expression_all, max(1, 2, five, 3, four)
+// ASM: .set max_expression_two, 2
+// ASM: .set max_expression_one, 1
+// ASM: .set max_literals, 10
+// ASM: .set max_with_max_sym, max(max, 4, 3, 1, 2)
+
+.set max_expression_all, max(one, two, five, three, four)
+.set max_expression_two, max(one, two)
+.set max_expression_one, max(one)
+.set max_literals, max(1,2,3,4,5,6,7,8,9,10)
+.set max_with_max_sym, max(max, 4, 3, one, two)
+
+// ASM: .set max_neg_numbers, -1
+// ASM: .set max_neg_number, -1
+
+.set neg_one, -1
+.set max_neg_numbers, max(-5, -4, -3, -2, neg_one)
+.set max_neg_number, max(neg_one)
+
+// ASM: .set max_with_subexpr, 3
+// ASM: .set max_as_subexpr, 1+(max(4, 3, five))
+// ASM: .set max_recursive_subexpr, max(max(1, four), 3, max_expression_all)
+
+.set max_with_subexpr, max(((one | 3) << 3) / 8)
+.set max_as_subexpr, 1 + max(4, 3, five)
+.set max_recursive_subexpr, max(max(one, four), three, max_expression_all)
+
+// ASM: .set max_expr_one_max, 9223372036854775807
+// ASM: .set max_expr_two_max, max(9223372036854775807, five)
+// ASM: .set max_expr_three_max, max(9223372036854775807, five, 10000000)
+
+.set max_expr_one_max, max(i64_max)
+.set max_expr_two_max, max(i64_max, five)
+.set max_expr_three_max, max(i64_max, five, 10000000)
+
+// ASM: .set max_expr_one_min, -9223372036854775808
+// ASM: .set max_expr_two_min, 3
+// ASM: .set max_expr_three_min, 10000000
+
+.set max_expr_one_min, max(i64_min)
+.set max_expr_two_min, max(i64_min, three)
+.set max_expr_three_min, max(i64_min, three, 10000000)
+
+// ASM: .set or_expression_all, or(1, 2, five, 3, four)
+// ASM: .set or_expression_two, 3
+// ASM: .set or_expression_one, 1
+// ASM: .set or_literals, 15
+// ASM: .set or_false, 0
+// ASM: .set or_with_or_sym, or(or, 4, 3, 1, 2)
+
+.set or_expression_all, or(one, two, five, three, four)
+.set or_expression_two, or(one, two)
+.set or_expression_one, or(one)
+.set or_literals, or(1,2,3,4,5,6,7,8,9,10)
+.set or_false, or(zero, 0, (2-2), 5 > 6)
+.set or_with_or_sym, or(or, 4, 3, one, two)
+
+// ASM: .set or_with_subexpr, 3
+// ASM: .set or_as_subexpr, 1+(or(4, 3, five))
+// ASM: .set or_recursive_subexpr, or(or(1, four), 3, or_expression_all)
+
+.set or_with_subexpr, or(((one | 3) << 3) / 8)
+.set or_as_subexpr, 1 + or(4, 3, five)
+.set or_recursive_subexpr, or(or(one, four), three, or_expression_all)
+
+// ASM: .set four, 4
+// ASM: .set five, 5
+// ASM: .set max, 15
+// ASM: .set or, 255
+
+.set four, 4
+.set five, 5
+.set max, 0xF
+.set or, 0xFF
diff --git a/llvm/test/MC/AMDGPU/mcexpr_amd_err.s b/llvm/test/MC/AMDGPU/mcexpr_amd_err.s
new file mode 100644
index 0000000..ea02e01
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/mcexpr_amd_err.s
@@ -0,0 +1,53 @@
+// RUN: not llvm-mc -triple amdgcn-amd-amdhsa %s 2>&1 | FileCheck --check-prefix=ASM %s
+
+.set one, 1
+.set two, 2
+.set three, 3
+
+.set max_empty, max()
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: empty max expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set or_empty, or()
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: empty or expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set max_post_aux_comma, max(one,)
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: mismatch of commas in max expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set max_pre_aux_comma, max(,one)
+// asm: :[[@line-1]]:{{[0-9]+}}: error: unknown token in expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set max_double_comma, max(one,, two)
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: unknown token in expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set max_no_comma, max(one two)
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: unexpected token in max expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set max_missing_paren, max(two
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: unexpected token in max expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set max_expression_one, max(three, four,
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: unknown token in expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set or_expression_one, or(four, five
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: unexpected token in or expression
+// ASM: :[[@LINE-2]]:{{[0-9]+}}: error: missing expression
+
+.set max_no_lparen, max four, five)
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: expected newline
+
+.set max_no_paren, max one, two, three
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: expected newline
+
+.set max_rparen_only, max)
+// ASM: :[[@LINE-1]]:{{[0-9]+}}: error: expected newline
+
+.set four, 4
+.set five, 5
diff --git a/llvm/test/MC/AMDGPU/vinterp-fake16.s b/llvm/test/MC/AMDGPU/vinterp-fake16.s
new file mode 100644
index 0000000..33dacdd
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/vinterp-fake16.s
@@ -0,0 +1,182 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -show-encoding %s | FileCheck -check-prefix=GCN %s
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -show-encoding %s | FileCheck -check-prefix=GCN %s
+
+v_interp_p10_f32 v0, v1, v2, v3
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v1, v10, v20, v30
+// GCN: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04]
+
+v_interp_p10_f32 v2, v11, v21, v31
+// GCN: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04]
+
+v_interp_p10_f32 v3, v12, v22, v32
+// GCN: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 clamp
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, -v1, v2, v3
+// GCN: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p10_f32 v0, v1, -v2, v3
+// GCN: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p10_f32 v0, v1, v2, -v3
+// GCN: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v1, v10, v20, v30
+// GCN: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04]
+
+v_interp_p2_f32 v2, v11, v21, v31
+// GCN: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04]
+
+v_interp_p2_f32 v3, v12, v22, v32
+// GCN: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 clamp
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, -v1, v2, v3
+// GCN: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p2_f32 v0, v1, -v2, v3
+// GCN: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p2_f32 v0, v1, v2, -v3
+// GCN: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p10_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p10_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p2_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p2_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
diff --git a/llvm/test/MC/ARM/arm-branch-errors.s b/llvm/test/MC/ARM/arm-branch-errors.s
index bbf6445..5d7ae12 100644
--- a/llvm/test/MC/ARM/arm-branch-errors.s
+++ b/llvm/test/MC/ARM/arm-branch-errors.s
@@ -10,13 +10,13 @@
@ CHECK: error: invalid instruction, any one of the following would fix this:
@ CHECK: b #2
-@ CHECK: note: instruction requires: thumb
@ CHECK: note: invalid operand for instruction
+@ CHECK: note: instruction requires: thumb
@ CHECK: error: invalid instruction, any one of the following would fix this:
@ CHECK: bl #2
@ CHECK: note: instruction requires: thumb
@ CHECK: note: invalid operand for instruction
@ CHECK: error: invalid instruction, any one of the following would fix this:
@ CHECK: beq #2
-@ CHECK: note: instruction requires: thumb
@ CHECK: note: invalid operand for instruction
+@ CHECK: note: instruction requires: thumb
diff --git a/llvm/test/MC/ARM/arm11-hint-instr.s b/llvm/test/MC/ARM/arm11-hint-instr.s
index 4193a68..d9eaa5a 100644
--- a/llvm/test/MC/ARM/arm11-hint-instr.s
+++ b/llvm/test/MC/ARM/arm11-hint-instr.s
@@ -65,7 +65,13 @@
@ CHECK-THUMB: wfe @ encoding: [0x20,0xbf]
@ CHECK-THUMB: wfi @ encoding: [0x30,0xbf]
@ CHECK-THUMB: sev @ encoding: [0x40,0xbf]
-@ CHECK-ERROR-THUMB: error: instruction requires: v7 clrex
+@ CHECK-ERROR-THUMB: error: invalid instruction, any one of the following would fix this:
+@ CHECK-ERROR-THUMB: clrex
+@ CHECK-ERROR-THUMB: ^
+@ CHECK-ERROR-THUMB: note: instruction requires: v7 clrex
+@ CHECK-ERROR-THUMB: clrex
+@ CHECK-ERROR-THUMB: ^
+@ CHECK-ERROR-THUMB: note: instruction requires: arm-mode
@ CHECK-ERROR-THUMB: clrex
@ CHECK-ERROR-THUMB: ^
diff --git a/llvm/test/MC/ARM/basic-arm-instructions.s b/llvm/test/MC/ARM/basic-arm-instructions.s
index 055f3ce3..9f3a5cd 100644
--- a/llvm/test/MC/ARM/basic-arm-instructions.s
+++ b/llvm/test/MC/ARM/basic-arm-instructions.s
@@ -1202,6 +1202,10 @@ Lforward:
@ CHECK: ldrex r1, [r7] @ encoding: [0x9f,0x1f,0x97,0xe1]
@ CHECK: ldrexd r6, r7, [r8] @ encoding: [0x9f,0x6f,0xb8,0xe1]
+@ GNU alias
+ ldrexd r6, [r8]
+@ CHECK: ldrexd r6, r7, [r8] @ encoding: [0x9f,0x6f,0xb8,0xe1]
+
@------------------------------------------------------------------------------
@ LDRHT
@------------------------------------------------------------------------------
@@ -1774,6 +1778,9 @@ Lforward:
pkhtb r2, r2, r3, asr #31
pkhtb r2, r2, r3, asr #15
+ it ne
+ pkhtbne r2, r2, r3, asr #15
+
@ CHECK: pkhbt r2, r2, r3 @ encoding: [0x13,0x20,0x82,0xe6]
@ CHECK: pkhbt r2, r2, r3, lsl #31 @ encoding: [0x93,0x2f,0x82,0xe6]
@ CHECK: pkhbt r2, r2, r3 @ encoding: [0x13,0x20,0x82,0xe6]
@@ -1782,6 +1789,7 @@ Lforward:
@ CHECK: pkhbt r2, r3, r2 @ encoding: [0x12,0x20,0x83,0xe6]
@ CHECK: pkhtb r2, r2, r3, asr #31 @ encoding: [0xd3,0x2f,0x82,0xe6]
@ CHECK: pkhtb r2, r2, r3, asr #15 @ encoding: [0xd3,0x27,0x82,0xe6]
+@ CHECK: pkhtbne r2, r2, r3, asr #15 @ encoding: [0xd3,0x27,0x82,0x16]
@------------------------------------------------------------------------------
@ FIXME: PLD
@@ -2900,6 +2908,10 @@ Lforward:
@ CHECK: strex r2, r1, [r7] @ encoding: [0x91,0x2f,0x87,0xe1]
@ CHECK: strexd r6, r2, r3, [r8] @ encoding: [0x92,0x6f,0xa8,0xe1]
+@ GNU alias
+ strexd r6, r2, [r8]
+@ CHECK: strexd r6, r2, r3, [r8] @ encoding: [0x92,0x6f,0xa8,0xe1]
+
@------------------------------------------------------------------------------
@ STR
@------------------------------------------------------------------------------
diff --git a/llvm/test/MC/ARM/cde-fp-vec.s b/llvm/test/MC/ARM/cde-fp-vec.s
index 4b13957..fa18ffa 100644
--- a/llvm/test/MC/ARM/cde-fp-vec.s
+++ b/llvm/test/MC/ARM/cde-fp-vec.s
@@ -12,7 +12,7 @@ ittt eq
vcx1a p1, s7, #2047
// ERROR: [[@LINE+1]]:{{[0-9]+}}: error: instructions in IT block must be predicable
vcx2 p0, d0, d15, #0
-// ERROR-FP: [[@LINE+2]]:{{[0-9]+}}: error: invalid instruction
+// ERROR-FP: [[@LINE+2]]:{{[0-9]+}}: error: instruction requires: mve
// ERROR-MVE: [[@LINE+1]]:{{[0-9]+}}: error: instructions in IT block must be predicable
vcx3 p0, q0, q7, q0, #12
nop
@@ -33,12 +33,15 @@ vcx1a p1, d3, #2047
// ERROR-FP: [[@LINE+1]]:{{[0-9]+}}: error: invalid instruction, any one of the following would fix this:
vcx1 p0, q1, #1234
// CHECK-MVE-NEXT: vcx1a p1, q5, #4095 @ encoding: [0x2f,0xfd,0xff,0xa1]
-// ERROR-FP: [[@LINE+1]]:{{[0-9]+}}: error: invalid instruction
+// ERROR-FP: [[@LINE+1]]:{{[0-9]+}}: error: instruction requires: mve
vcx1a p1, q5, #4095
// ERROR: [[@LINE+1]]:{{[0-9]+}}: error: invalid instruction
vcx1a p1, s7, s7, #2047
-// ERROR: [[@LINE+1]]:{{[0-9]+}}: error: operand must be an immediate in the range [0,2047]
+// ERROR-FP: [[@LINE+4]]:{{[0-9]+}}: error: operand must be an immediate in the range [0,2047]
+// ERROR-MVE: [[@LINE+3]]:{{[0-9]+}}: error: invalid instruction, any one of the following would fix this
+// ERROR-MVE: [[@LINE+2]]:{{[0-9]+}}: note: operand must be a register in range [q0, q7]
+// ERROR-MVE: [[@LINE+1]]:{{[0-9]+}}: note: operand must be an immediate in the range [0,2047]
vcx1 p0, d0, #2048
// ERROR-FP: [[@LINE+1]]:{{[0-9]+}}: error: operand must be an immediate in the range [0,2047]
vcx1a p1, s0, #2048
@@ -51,10 +54,13 @@ vcx1 p8, d0, #1234
vcx1 p0, d16, #1234
// ERROR: [[@LINE+1]]:{{[0-9]+}}: error: invalid instruction
vcx1 p0, s32, #1234
-// ERROR-FP: [[@LINE+4]]:{{[0-9]+}}: error: invalid instruction, any one of the following would fix this:
-// ERROR-FP: [[@LINE+3]]:{{[0-9]+}}: note: operand must be a register in range [s0, s31]
-// ERROR-FP: [[@LINE+2]]:{{[0-9]+}}: note: operand must be a register in range [d0, d15]
-// ERROR-MVE: [[@LINE+1]]:{{[0-9]+}}: error: operand must be a register in range [q0, q7]
+// ERROR-FP: [[@LINE+7]]:{{[0-9]+}}: error: invalid instruction, any one of the following would fix this:
+// ERROR-FP: [[@LINE+6]]:{{[0-9]+}}: note: operand must be a register in range [s0, s31]
+// ERROR-FP: [[@LINE+5]]:{{[0-9]+}}: note: operand must be a register in range [d0, d15]
+// ERROR-MVE: [[@LINE+4]]:{{[0-9]+}}: error: invalid instruction, any one of the following would fix this:
+// ERROR-MVE: [[@LINE+3]]:{{[0-9]+}}: note: operand must be a register in range [q0, q7]
+// ERROR-MVE: [[@LINE+2]]:{{[0-9]+}}: note: operand must be a register in range [s0, s31]
+// ERROR-MVE: [[@LINE+1]]:{{[0-9]+}}: note: operand must be a register in range [d0, d15]
vcx1 p0, q8, #1234
// ERROR: [[@LINE+3]]:{{[0-9]+}}: error: invalid instruction, any one of the following would fix this:
// ERROR: [[@LINE+2]]:{{[0-9]+}}: note: operand must be a register in range [s0, s31]
@@ -116,7 +122,7 @@ vcx3a p1, d1, d11, d12, #8
// ERROR-MVE: [[@LINE+2]]:{{[0-9]+}}: error: operand must be an immediate in the range [0,15]
// ERROR-FP: error: invalid instruction
vcx3a p1, q1, q2, q3, #16
-// ERROR-MVE: [[@LINE+2]]:{{[0-9]+}}: error: invalid instruction
+// ERROR-MVE: [[@LINE+2]]:{{[0-9]+}}: error: operand must be a register in range [d0, d15]
// ERROR-FP: [[@LINE+1]]:{{[0-9]+}}: error: operand must be a register in range [d0, d15]
vcx3 p0, d0, q0, d7, #1
// ERROR: [[@LINE+1]]:{{[0-9]+}}: error: operand must be a register in range [s0, s31]
diff --git a/llvm/test/MC/ARM/cde-vec-pred.s b/llvm/test/MC/ARM/cde-vec-pred.s
index 6274faf..9932f8d 100644
--- a/llvm/test/MC/ARM/cde-vec-pred.s
+++ b/llvm/test/MC/ARM/cde-vec-pred.s
@@ -19,7 +19,7 @@ vcx3at p1, q3, q7, q6, #15
vcx3e p0, q0, q2, q0, #12
vpt.i8 eq, q0, q0
-// ERROR: [[@LINE+1]]:{{[0-9]+}}: error: incorrect predication in VPT block; got 'none', but expected 't'
+// ERROR: error: incorrect predication in VPT block; got 'none', but expected 't'
vcx1 p0, q1, #1234
vpt.i8 eq, q0, q0
diff --git a/llvm/test/MC/ARM/cps.s b/llvm/test/MC/ARM/cps.s
index bafdfde..1034ed9 100644
--- a/llvm/test/MC/ARM/cps.s
+++ b/llvm/test/MC/ARM/cps.s
@@ -26,6 +26,6 @@
@ V6-ERRORS: note: too many operands for instruction
@ V6-ERRORS: error: invalid instruction, any one of the following would fix this:
@ V6-ERRORS: cps #0
-@ V6-ERRORS: note: too few operands for instruction
@ V6-ERRORS: note: instruction requires: arm-mode
@ V6-ERRORS: note: instruction requires: thumb2
+@ V6-ERRORS: note: too few operands for instruction
diff --git a/llvm/test/MC/ARM/diagnostics.s b/llvm/test/MC/ARM/diagnostics.s
index e6d80ea..fa23a7d 100644
--- a/llvm/test/MC/ARM/diagnostics.s
+++ b/llvm/test/MC/ARM/diagnostics.s
@@ -288,7 +288,7 @@
@ CHECK-ERRORS: error: 'asr' shift amount must be in range [1,32]
@ CHECK-ERRORS: ssat r8, #1, r10, asr #33
@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: error: shift operator 'asr' or 'lsl' expected
+@ CHECK-ERRORS: error: operand must be a register in range [r0, r14]
@ CHECK-ERRORS: ssat r8, #1, r10, lsr #5
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: error: '#' expected
diff --git a/llvm/test/MC/ARM/directive-arch_extension-crypto.s b/llvm/test/MC/ARM/directive-arch_extension-crypto.s
index 8d3cd9e..05b6d9e 100644
--- a/llvm/test/MC/ARM/directive-arch_extension-crypto.s
+++ b/llvm/test/MC/ARM/directive-arch_extension-crypto.s
@@ -10,15 +10,16 @@
.syntax unified
.arch_extension crypto
-@ CHECK-V7: error: architectural extension 'crypto' is not allowed for the current base architecture
+@ CHECK-V7: architectural extension 'crypto' is not allowed for the current base architecture
@ CHECK-V7-NEXT: .arch_extension crypto
@ CHECK-V7-NEXT: ^
.type crypto,%function
crypto:
vmull.p64 q0, d0, d1
-@ CHECK-V7: error: instruction requires: aes armv8
-
+@ CHECK-V7: error: invalid instruction, any one of the following would fix this:
+@ CHECK-V7: note: invalid operand for instruction
+@ CHECK-V7: note: instruction requires: aes armv8
aesd.8 q0, q1
@ CHECK-V7: error: instruction requires: aes armv8
aese.8 q0, q1
@@ -51,14 +52,18 @@ crypto:
@ CHECK-V7: error: instruction requires: sha2 armv8
.arch_extension nocrypto
+@ CHECK-V7: error: architectural extension 'sha2' is not allowed for the current base architecture
+@ CHECK-V7: error: architectural extension 'aes' is not allowed for the current base architecture
@ CHECK-V7: error: architectural extension 'crypto' is not allowed for the current base architecture
-@ CHECK-V7-NEXT: .arch_extension nocrypto
+@ CHECK-V7-NEXT: .arch_extension nocrypto
@ CHECK-V7-NEXT: ^
.type nocrypto,%function
nocrypto:
vmull.p64 q0, d0, d1
-@ CHECK-V7: error: instruction requires: aes armv8
+@ CHECK-V7: error: invalid instruction, any one of the following
+@ CHECK-V7: note: invalid operand for instruction
+@ CHECK-V7: note: instruction requires: aes armv8
@ CHECK-V8: error: instruction requires: aes
aesd.8 q0, q1
diff --git a/llvm/test/MC/ARM/invalid-fp-armv8.s b/llvm/test/MC/ARM/invalid-fp-armv8.s
index dca0e44..c8ce261 100644
--- a/llvm/test/MC/ARM/invalid-fp-armv8.s
+++ b/llvm/test/MC/ARM/invalid-fp-armv8.s
@@ -88,6 +88,8 @@ vrinta.f64.f64 s3, q0
vrintn.f32.f32 d3, d0
@ V8: error: instruction requires: NEON
vrintp.f32 q3, q0
-@ V8: error: instruction requires: NEON
+@ V8: error: invalid instruction, any one of the following would fix this:
+@ V8: note: instruction requires: mve.fp
+@ V8: note: instruction requires: NEON
vrintmlt.f32 q3, q0
@ V8: error: instruction 'vrintm' is not predicable, but condition code specified
diff --git a/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s b/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s
index be8d3c3..b802bfd 100644
--- a/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s
+++ b/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s
@@ -14,6 +14,10 @@
@ CHECK-V7: error: instruction requires: acquire/release
@ CHECK-V7: error: instruction requires: acquire/release
+@ GNU alias
+ ldaexd r6, [r8]
+@ CHECK: ldaexd r6, r7, [r8] @ encoding: [0xd8,0xe8,0xff,0x67]
+
stlexb r1, r3, [r4]
stlexh r4, r2, [r5]
stlex r2, r1, [r7]
@@ -27,6 +31,10 @@
@ CHECK-V7: error: instruction requires: acquire/release
@ CHECK-V7: error: instruction requires: acquire/release
+@ GNU alias
+ stlexd r6, r2, [r8]
+@ CHECK: stlexd r6, r2, r3, [r8] @ encoding: [0xc8,0xe8,0xf6,0x23]
+
lda r5, [r6]
ldab r5, [r6]
ldah r12, [r9]
diff --git a/llvm/test/MC/ARM/load-store-acquire-release-v8.s b/llvm/test/MC/ARM/load-store-acquire-release-v8.s
index 273519e..edfe14c 100644
--- a/llvm/test/MC/ARM/load-store-acquire-release-v8.s
+++ b/llvm/test/MC/ARM/load-store-acquire-release-v8.s
@@ -1,4 +1,5 @@
-@ RUN: llvm-mc -triple=armv8 -show-encoding < %s | FileCheck %s
+@ RUN: not llvm-mc -triple=armv8 -show-encoding < %s 2> %t | FileCheck %s
+@ RUN: FileCheck %s < %t --check-prefix=CHECK-ERROR
@ RUN: not llvm-mc -triple=armv7 -show-encoding < %s 2>&1 | FileCheck %s --check-prefix=CHECK-V7
ldaexb r3, [r4]
ldaexh r2, [r5]
@@ -14,6 +15,13 @@
@ CHECK-V7: instruction requires: acquire/release
@ CHECK-V7: instruction requires: acquire/release
+ ldaexd r2, r4, [r8]
+@ CHECK-ERROR: error: destination operands must be sequential
+
+@ GNU alias
+ ldaexd r6, [r8]
+@ CHECK: ldaexd r6, r7, [r8] @ encoding: [0x9f,0x6e,0xb8,0xe1]
+
stlexb r1, r3, [r4]
stlexh r4, r2, [r5]
stlex r2, r1, [r7]
@@ -27,6 +35,13 @@
@ CHECK-V7: instruction requires: acquire/release
@ CHECK-V7: instruction requires: acquire/release
+ stlexd r6, r2, r4, [r8]
+@ CHECK-ERROR: error: source operands must be sequential
+
+@ GNU alias
+ stlexd r6, r2, [r8]
+@ CHECK: stlexd r6, r2, r3, [r8] @ encoding: [0x92,0x6e,0xa8,0xe1]
+
lda r5, [r6]
ldab r5, [r6]
ldah r12, [r9]
diff --git a/llvm/test/MC/ARM/lsl-zero-errors.s b/llvm/test/MC/ARM/lsl-zero-errors.s
index e021aa9..1e51c58 100644
--- a/llvm/test/MC/ARM/lsl-zero-errors.s
+++ b/llvm/test/MC/ARM/lsl-zero-errors.s
@@ -55,22 +55,22 @@
// CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
// CHECK-NONARM-NEXT: mov pc, r0, lsl #0
-// CHECK-NONARM: note: operand must be a register in range [r0, r15]
// CHECK-THUMBV7: note: operand must be a register in range [r0, r12] or r14
// CHECK-THUMBV8: note: operand must be a register in range [r0, r14]
+// CHECK-NONARM: note: operand must be a register in range [r0, r15]
// CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
// CHECK-NONARM-NEXT: mov r0, pc, lsl #0
-// CHECK-NONARM: note: operand must be a register in range [r0, r15]
// CHECK-NONARM: note: invalid operand for instruction
// CHECK-NONARM: note: invalid operand for instruction
// CHECK-NONARM: note: operand must be an immediate in the range [256,65535]
+// CHECK-NONARM: note: operand must be a register in range [r0, r15]
// CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
// CHECK-NONARM-NEXT: mov pc, pc, lsl #0
-// CHECK-NONARM: note: operand must be a register in range [r0, r15]
// CHECK-THUMBV7: note: operand must be a register in range [r0, r12] or r14
// CHECK-THUMBV8: note: operand must be a register in range [r0, r14]
+// CHECK-NONARM: note: operand must be a register in range [r0, r15]
// CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
// CHECK-NONARM-NEXT: movs pc, r0, lsl #0
@@ -134,8 +134,8 @@
// FIXME: We should consistently have the "requires ARMv8" error here
// CHECK-THUMBV7: error: invalid instruction, any one of the following would fix this:
// CHECK-THUMBV7-NEXT: mov sp, sp, lsl #0
-// CHECK-THUMBV7: note: operand must be a register in range [r0, r15]
// CHECK-THUMBV7: note: operand must be a register in range [r0, r12] or r14
+// CHECK-THUMBV7: note: operand must be a register in range [r0, r15]
// CHECK-THUMBV7: error: invalid instruction, any one of the following would fix this:
// CHECK-THUMBV7-NEXT: movs sp, sp, lsl #0
diff --git a/llvm/test/MC/ARM/mve-load-store.s b/llvm/test/MC/ARM/mve-load-store.s
index 5c6d2a17..797ab1e 100644
--- a/llvm/test/MC/ARM/mve-load-store.s
+++ b/llvm/test/MC/ARM/mve-load-store.s
@@ -5,55 +5,55 @@
# RUN: FileCheck --check-prefix=ERROR-NOMVE < %t %s
# CHECK: vldrb.u8 q0, [r0] @ encoding: [0x90,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q0, [r0]
# CHECK: vldrb.u8 q1, [r0] @ encoding: [0x90,0xed,0x00,0x3e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q1, [r0]
# CHECK: vldrb.u8 q0, [r11] @ encoding: [0x9b,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q0, [r11]
# CHECK: vldrb.u8 q3, [r11] @ encoding: [0x9b,0xed,0x00,0x7e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q3, [r11]
# CHECK: vldrb.u8 q0, [r4, #56] @ encoding: [0x94,0xed,0x38,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q0, [r4, #56]
# CHECK: vldrb.u8 q4, [r4, #56] @ encoding: [0x94,0xed,0x38,0x9e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q4, [r4, #56]
# CHECK: vldrb.u8 q0, [r8, #56] @ encoding: [0x98,0xed,0x38,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q0, [r8, #56]
# CHECK: vldrb.u8 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q5, [r4, #56]!
# CHECK: vldrb.u8 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q5, [r4, #56]!
# CHECK: vldrb.u8 q5, [r4], #-25 @ encoding: [0x34,0xec,0x19,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q5, [r4], #-25
# CHECK: vldrb.u8 q5, [r10], #-25 @ encoding: [0x3a,0xec,0x19,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q5, [r10], #-25
# CHECK: vldrb.u8 q5, [sp, #-25] @ encoding: [0x1d,0xed,0x19,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q5, [sp, #-25]
# CHECK: vldrb.u8 q5, [sp, #-127] @ encoding: [0x1d,0xed,0x7f,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q5, [sp, #-127]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
@@ -69,55 +69,55 @@ vldrb.u8 q0, [r0, #-128]!
vldrb.u8 q0, [r0], #128
# CHECK: vstrb.8 q0, [r0] @ encoding: [0x80,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q0, [r0]
# CHECK: vstrb.8 q1, [r0] @ encoding: [0x80,0xed,0x00,0x3e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q1, [r0]
# CHECK: vstrb.8 q0, [r11] @ encoding: [0x8b,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q0, [r11]
# CHECK: vstrb.8 q3, [r11] @ encoding: [0x8b,0xed,0x00,0x7e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q3, [r11]
# CHECK: vstrb.8 q0, [r4, #56] @ encoding: [0x84,0xed,0x38,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q0, [r4, #56]
# CHECK: vstrb.8 q4, [r4, #56] @ encoding: [0x84,0xed,0x38,0x9e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q4, [r4, #56]
# CHECK: vstrb.8 q0, [r8, #56] @ encoding: [0x88,0xed,0x38,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q0, [r8, #56]
# CHECK: vstrb.8 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q5, [r4, #56]!
# CHECK: vstrb.8 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q5, [r4, #56]!
# CHECK: vstrb.8 q5, [r4], #-25 @ encoding: [0x24,0xec,0x19,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q5, [r4], #-25
# CHECK: vstrb.8 q5, [r10], #-25 @ encoding: [0x2a,0xec,0x19,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q5, [r10], #-25
# CHECK: vstrb.8 q5, [sp, #-25] @ encoding: [0x0d,0xed,0x19,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q5, [sp, #-25]
# CHECK: vstrb.8 q5, [sp, #127] @ encoding: [0x8d,0xed,0x7f,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q5, [sp, #127]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
@@ -133,735 +133,735 @@ vstrb.u8 q0, [r0, #-128]!
vstrb.u8 q0, [r0], #128
# CHECK: vldrb.u16 q0, [r0] @ encoding: [0x90,0xfd,0x80,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q0, [r0]
# CHECK: vldrb.u16 q1, [r0] @ encoding: [0x90,0xfd,0x80,0x2e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q1, [r0]
# CHECK: vldrb.u16 q0, [r7] @ encoding: [0x97,0xfd,0x80,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q0, [r7]
# CHECK: vldrb.u16 q3, [r7] @ encoding: [0x97,0xfd,0x80,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q3, [r7]
# CHECK: vldrb.u16 q0, [r4, #56] @ encoding: [0x94,0xfd,0xb8,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q0, [r4, #56]
# CHECK: vldrb.u16 q4, [r4, #56] @ encoding: [0x94,0xfd,0xb8,0x8e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q4, [r4, #56]
# CHECK: vldrb.u16 q0, [r2, #56] @ encoding: [0x92,0xfd,0xb8,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q0, [r2, #56]
# CHECK: vldrb.u16 q5, [r4, #56]! @ encoding: [0xb4,0xfd,0xb8,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q5, [r4, #56]!
# CHECK: vldrb.u16 q5, [r4, #56]! @ encoding: [0xb4,0xfd,0xb8,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q5, [r4, #56]!
# CHECK: vldrb.u16 q5, [r4], #-1 @ encoding: [0x34,0xfc,0x81,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q5, [r4], #-1
# CHECK: vldrb.u16 q5, [r3], #-25 @ encoding: [0x33,0xfc,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q5, [r3], #-25
# CHECK: vldrb.u16 q5, [r6, #-25] @ encoding: [0x16,0xfd,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q5, [r6, #-25]
# CHECK: vldrb.u16 q5, [r6, #-64] @ encoding: [0x16,0xfd,0xc0,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q5, [r6, #-64]
# CHECK: vldrb.s16 q0, [r0] @ encoding: [0x90,0xed,0x80,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q0, [r0]
# CHECK: vldrb.s16 q1, [r0] @ encoding: [0x90,0xed,0x80,0x2e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q1, [r0]
# CHECK: vldrb.s16 q0, [r7] @ encoding: [0x97,0xed,0x80,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q0, [r7]
# CHECK: vldrb.s16 q3, [r7] @ encoding: [0x97,0xed,0x80,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q3, [r7]
# CHECK: vldrb.s16 q0, [r4, #56] @ encoding: [0x94,0xed,0xb8,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q0, [r4, #56]
# CHECK: vldrb.s16 q4, [r4, #56] @ encoding: [0x94,0xed,0xb8,0x8e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q4, [r4, #56]
# CHECK: vldrb.s16 q0, [r2, #56] @ encoding: [0x92,0xed,0xb8,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q0, [r2, #56]
# CHECK: vldrb.s16 q5, [r4, #56]! @ encoding: [0xb4,0xed,0xb8,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q5, [r4, #56]!
# CHECK: vldrb.s16 q5, [r4, #56]! @ encoding: [0xb4,0xed,0xb8,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q5, [r4, #56]!
# CHECK: vldrb.s16 q5, [r4], #-25 @ encoding: [0x34,0xec,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q5, [r4], #-25
# CHECK: vldrb.s16 q5, [r3], #-25 @ encoding: [0x33,0xec,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q5, [r3], #-25
# CHECK: vldrb.s16 q5, [r6, #-25] @ encoding: [0x16,0xed,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q5, [r6, #-25]
# CHECK: vldrb.s16 q5, [r6, #-64] @ encoding: [0x16,0xed,0xc0,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q5, [r6, #-64]
# CHECK: vstrb.16 q0, [r0] @ encoding: [0x80,0xed,0x80,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q0, [r0]
# CHECK: vstrb.16 q1, [r0] @ encoding: [0x80,0xed,0x80,0x2e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q1, [r0]
# CHECK: vstrb.16 q0, [r7] @ encoding: [0x87,0xed,0x80,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q0, [r7]
# CHECK: vstrb.16 q3, [r7] @ encoding: [0x87,0xed,0x80,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q3, [r7]
# CHECK: vstrb.16 q0, [r4, #56] @ encoding: [0x84,0xed,0xb8,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q0, [r4, #56]
# CHECK: vstrb.16 q4, [r4, #56] @ encoding: [0x84,0xed,0xb8,0x8e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q4, [r4, #56]
# CHECK: vstrb.16 q0, [r5, #56] @ encoding: [0x85,0xed,0xb8,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q0, [r5, #56]
# CHECK: vstrb.16 q5, [r4, #56]! @ encoding: [0xa4,0xed,0xb8,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q5, [r4, #56]!
# CHECK: vstrb.16 q5, [r4, #56]! @ encoding: [0xa4,0xed,0xb8,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q5, [r4, #56]!
# CHECK: vstrb.16 q5, [r4], #-25 @ encoding: [0x24,0xec,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q5, [r4], #-25
# CHECK: vstrb.16 q5, [r3], #-25 @ encoding: [0x23,0xec,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q5, [r3], #-25
# CHECK: vstrb.16 q5, [r2, #-25] @ encoding: [0x02,0xed,0x99,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q5, [r2, #-25]
# CHECK: vstrb.16 q5, [r2, #-64] @ encoding: [0x02,0xed,0xc0,0xae]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q5, [r2, #-64]
# CHECK: vldrb.u32 q0, [r0] @ encoding: [0x90,0xfd,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q0, [r0]
# CHECK: vldrb.u32 q1, [r0] @ encoding: [0x90,0xfd,0x00,0x2f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q1, [r0]
# CHECK: vldrb.u32 q0, [r7] @ encoding: [0x97,0xfd,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q0, [r7]
# CHECK: vldrb.u32 q3, [r7] @ encoding: [0x97,0xfd,0x00,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q3, [r7]
# CHECK: vldrb.u32 q0, [r4, #56] @ encoding: [0x94,0xfd,0x38,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q0, [r4, #56]
# CHECK: vldrb.u32 q4, [r4, #56] @ encoding: [0x94,0xfd,0x38,0x8f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q4, [r4, #56]
# CHECK: vldrb.u32 q0, [r2, #56] @ encoding: [0x92,0xfd,0x38,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q0, [r2, #56]
# CHECK: vldrb.u32 q5, [r4, #56]! @ encoding: [0xb4,0xfd,0x38,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q5, [r4, #56]!
# CHECK: vldrb.u32 q5, [r4, #56]! @ encoding: [0xb4,0xfd,0x38,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q5, [r4, #56]!
# CHECK: vldrb.u32 q5, [r4], #-25 @ encoding: [0x34,0xfc,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q5, [r4], #-25
# CHECK: vldrb.u32 q5, [r3], #-25 @ encoding: [0x33,0xfc,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q5, [r3], #-25
# CHECK: vldrb.u32 q5, [r6, #-25] @ encoding: [0x16,0xfd,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q5, [r6, #-25]
# CHECK: vldrb.u32 q5, [r6, #-64] @ encoding: [0x16,0xfd,0x40,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q5, [r6, #-64]
# CHECK: vldrb.s32 q0, [r0] @ encoding: [0x90,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q0, [r0]
# CHECK: vldrb.s32 q1, [r0] @ encoding: [0x90,0xed,0x00,0x2f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q1, [r0]
# CHECK: vldrb.s32 q0, [r7] @ encoding: [0x97,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q0, [r7]
# CHECK: vldrb.s32 q3, [r7] @ encoding: [0x97,0xed,0x00,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q3, [r7]
# CHECK: vldrb.s32 q0, [r4, #56] @ encoding: [0x94,0xed,0x38,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q0, [r4, #56]
# CHECK: vldrb.s32 q4, [r4, #56] @ encoding: [0x94,0xed,0x38,0x8f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q4, [r4, #56]
# CHECK: vldrb.s32 q0, [r2, #56] @ encoding: [0x92,0xed,0x38,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q0, [r2, #56]
# CHECK: vldrb.s32 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x38,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q5, [r4, #56]!
# CHECK: vldrb.s32 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x38,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q5, [r4, #56]!
# CHECK: vldrb.s32 q5, [r4], #-25 @ encoding: [0x34,0xec,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q5, [r4], #-25
# CHECK: vldrb.s32 q5, [r3], #-25 @ encoding: [0x33,0xec,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q5, [r3], #-25
# CHECK: vldrb.s32 q5, [r6, #-25] @ encoding: [0x16,0xed,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q5, [r6, #-25]
# CHECK: vldrb.s32 q5, [r6, #-64] @ encoding: [0x16,0xed,0x40,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q5, [r6, #-64]
# CHECK: vstrb.32 q0, [r0] @ encoding: [0x80,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q0, [r0]
# CHECK: vstrb.32 q1, [r0] @ encoding: [0x80,0xed,0x00,0x2f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q1, [r0]
# CHECK: vstrb.32 q0, [r7] @ encoding: [0x87,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q0, [r7]
# CHECK: vstrb.32 q3, [r7] @ encoding: [0x87,0xed,0x00,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q3, [r7]
# CHECK: vstrb.32 q0, [r4, #56] @ encoding: [0x84,0xed,0x38,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q0, [r4, #56]
# CHECK: vstrb.32 q4, [r4, #56] @ encoding: [0x84,0xed,0x38,0x8f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q4, [r4, #56]
# CHECK: vstrb.32 q0, [r5, #56] @ encoding: [0x85,0xed,0x38,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q0, [r5, #56]
# CHECK: vstrb.32 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x38,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q5, [r4, #56]!
# CHECK: vstrb.32 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x38,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q5, [r4, #56]!
# CHECK: vstrb.32 q5, [r4], #-25 @ encoding: [0x24,0xec,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q5, [r4], #-25
# CHECK: vstrb.32 q5, [r3], #-25 @ encoding: [0x23,0xec,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q5, [r3], #-25
# CHECK: vstrb.32 q5, [r2, #-25] @ encoding: [0x02,0xed,0x19,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q5, [r2, #-25]
# CHECK: vstrb.32 q5, [r2, #-64] @ encoding: [0x02,0xed,0x40,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q5, [r2, #-64]
# CHECK: vldrh.u16 q0, [r0] @ encoding: [0x90,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r0]
# CHECK: vldrh.u16 q1, [r0] @ encoding: [0x90,0xed,0x80,0x3e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q1, [r0]
# CHECK: vldrh.u16 q0, [r11] @ encoding: [0x9b,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r11]
# CHECK: vldrh.u16 q3, [r11] @ encoding: [0x9b,0xed,0x80,0x7e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q3, [r11]
# CHECK: vldrh.u16 q0, [r4, #56] @ encoding: [0x94,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r4, #56]
# CHECK: vldrh.u16 q4, [r4, #56] @ encoding: [0x94,0xed,0x9c,0x9e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q4, [r4, #56]
# CHECK: vldrh.u16 q0, [r8, #56] @ encoding: [0x98,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r8, #56]
# CHECK: vldrh.u16 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [r4, #56]!
# CHECK: vldrh.u16 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [r4, #56]!
# CHECK: vldrh.u16 q5, [r4], #-26 @ encoding: [0x34,0xec,0x8d,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [r4], #-26
# CHECK: vldrh.u16 q5, [r10], #-26 @ encoding: [0x3a,0xec,0x8d,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [r10], #-26
# CHECK: vldrh.u16 q5, [sp, #-26] @ encoding: [0x1d,0xed,0x8d,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [sp, #-26]
# CHECK: vldrh.u16 q5, [sp, #-64] @ encoding: [0x1d,0xed,0xa0,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [sp, #-64]
# CHECK: vldrh.u16 q5, [sp, #-254] @ encoding: [0x1d,0xed,0xff,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [sp, #-254]
# CHECK: vldrh.u16 q5, [r10], #254 @ encoding: [0xba,0xec,0xff,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q5, [r10], #254
# CHECK: vstrh.16 q0, [r0] @ encoding: [0x80,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q0, [r0]
# CHECK: vstrh.16 q1, [r0] @ encoding: [0x80,0xed,0x80,0x3e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q1, [r0]
# CHECK: vstrh.16 q0, [r11] @ encoding: [0x8b,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q0, [r11]
# CHECK: vstrh.16 q3, [r11] @ encoding: [0x8b,0xed,0x80,0x7e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q3, [r11]
# CHECK: vstrh.16 q0, [r4, #56] @ encoding: [0x84,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q0, [r4, #56]
# CHECK: vstrh.16 q4, [r4, #56] @ encoding: [0x84,0xed,0x9c,0x9e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q4, [r4, #56]
# CHECK: vstrh.16 q0, [r8, #56] @ encoding: [0x88,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q0, [r8, #56]
# CHECK: vstrh.16 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [r4, #56]!
# CHECK: vstrh.16 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [r4, #56]!
# CHECK: vstrh.16 q5, [r4], #-26 @ encoding: [0x24,0xec,0x8d,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [r4], #-26
# CHECK: vstrh.16 q5, [r10], #-26 @ encoding: [0x2a,0xec,0x8d,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [r10], #-26
# CHECK: vstrh.16 q5, [sp, #-26] @ encoding: [0x0d,0xed,0x8d,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [sp, #-26]
# CHECK: vstrh.16 q5, [sp, #-64] @ encoding: [0x0d,0xed,0xa0,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [sp, #-64]
# CHECK: vstrh.16 q5, [sp, #-254] @ encoding: [0x0d,0xed,0xff,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [sp, #-254]
# CHECK: vstrh.16 q5, [r10], #254 @ encoding: [0xaa,0xec,0xff,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q5, [r10], #254
# CHECK: vldrh.u32 q0, [r0] @ encoding: [0x98,0xfd,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q0, [r0]
# CHECK: vldrh.u32 q1, [r0] @ encoding: [0x98,0xfd,0x00,0x2f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q1, [r0]
# CHECK: vldrh.u32 q0, [r7] @ encoding: [0x9f,0xfd,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q0, [r7]
# CHECK: vldrh.u32 q3, [r7] @ encoding: [0x9f,0xfd,0x00,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q3, [r7]
# CHECK: vldrh.u32 q0, [r4, #56] @ encoding: [0x9c,0xfd,0x1c,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q0, [r4, #56]
# CHECK: vldrh.u32 q4, [r4, #56] @ encoding: [0x9c,0xfd,0x1c,0x8f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q4, [r4, #56]
# CHECK: vldrh.u32 q0, [r2, #56] @ encoding: [0x9a,0xfd,0x1c,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q0, [r2, #56]
# CHECK: vldrh.u32 q5, [r4, #56]! @ encoding: [0xbc,0xfd,0x1c,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r4, #56]!
# CHECK: vldrh.u32 q5, [r4, #56]! @ encoding: [0xbc,0xfd,0x1c,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r4, #56]!
# CHECK: vldrh.u32 q5, [r4], #-26 @ encoding: [0x3c,0xfc,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r4], #-26
# CHECK: vldrh.u32 q5, [r3], #-26 @ encoding: [0x3b,0xfc,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r3], #-26
# CHECK: vldrh.u32 q5, [r6, #-26] @ encoding: [0x1e,0xfd,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r6, #-26]
# CHECK: vldrh.u32 q5, [r6, #-64] @ encoding: [0x1e,0xfd,0x20,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r6, #-64]
# CHECK: vldrh.u32 q5, [r6, #-254] @ encoding: [0x1e,0xfd,0x7f,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r6, #-254]
# CHECK: vldrh.u32 q5, [r4, #254]! @ encoding: [0xbc,0xfd,0x7f,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q5, [r4, #254]!
# CHECK: vldrh.s32 q0, [r0] @ encoding: [0x98,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q0, [r0]
# CHECK: vldrh.s32 q1, [r0] @ encoding: [0x98,0xed,0x00,0x2f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q1, [r0]
# CHECK: vldrh.s32 q0, [r7] @ encoding: [0x9f,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q0, [r7]
# CHECK: vldrh.s32 q3, [r7] @ encoding: [0x9f,0xed,0x00,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q3, [r7]
# CHECK: vldrh.s32 q0, [r4, #56] @ encoding: [0x9c,0xed,0x1c,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q0, [r4, #56]
# CHECK: vldrh.s32 q4, [r4, #56] @ encoding: [0x9c,0xed,0x1c,0x8f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q4, [r4, #56]
# CHECK: vldrh.s32 q0, [r2, #56] @ encoding: [0x9a,0xed,0x1c,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q0, [r2, #56]
# CHECK: vldrh.s32 q5, [r4, #56]! @ encoding: [0xbc,0xed,0x1c,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r4, #56]!
# CHECK: vldrh.s32 q5, [r4, #56]! @ encoding: [0xbc,0xed,0x1c,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r4, #56]!
# CHECK: vldrh.s32 q5, [r4], #-26 @ encoding: [0x3c,0xec,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r4], #-26
# CHECK: vldrh.s32 q5, [r3], #-26 @ encoding: [0x3b,0xec,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r3], #-26
# CHECK: vldrh.s32 q5, [r6, #-26] @ encoding: [0x1e,0xed,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r6, #-26]
# CHECK: vldrh.s32 q5, [r6, #-64] @ encoding: [0x1e,0xed,0x20,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r6, #-64]
# CHECK: vldrh.s32 q5, [r6, #-254] @ encoding: [0x1e,0xed,0x7f,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r6, #-254]
# CHECK: vldrh.s32 q5, [r4, #254]! @ encoding: [0xbc,0xed,0x7f,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q5, [r4, #254]!
# CHECK: vstrh.32 q0, [r0] @ encoding: [0x88,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q0, [r0]
# CHECK: vstrh.32 q1, [r0] @ encoding: [0x88,0xed,0x00,0x2f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q1, [r0]
# CHECK: vstrh.32 q0, [r7] @ encoding: [0x8f,0xed,0x00,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q0, [r7]
# CHECK: vstrh.32 q3, [r7] @ encoding: [0x8f,0xed,0x00,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q3, [r7]
# CHECK: vstrh.32 q0, [r4, #56] @ encoding: [0x8c,0xed,0x1c,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q0, [r4, #56]
# CHECK: vstrh.32 q4, [r4, #56] @ encoding: [0x8c,0xed,0x1c,0x8f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q4, [r4, #56]
# CHECK: vstrh.32 q0, [r5, #56] @ encoding: [0x8d,0xed,0x1c,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q0, [r5, #56]
# CHECK: vstrh.32 q5, [r4, #56]! @ encoding: [0xac,0xed,0x1c,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r4, #56]!
# CHECK: vstrh.32 q5, [r4, #56]! @ encoding: [0xac,0xed,0x1c,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r4, #56]!
# CHECK: vstrh.32 q5, [r4], #-26 @ encoding: [0x2c,0xec,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r4], #-26
# CHECK: vstrh.32 q5, [r3], #-26 @ encoding: [0x2b,0xec,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r3], #-26
# CHECK: vstrh.32 q5, [r2, #-26] @ encoding: [0x0a,0xed,0x0d,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r2, #-26]
# CHECK: vstrh.32 q5, [r2, #-64] @ encoding: [0x0a,0xed,0x20,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r2, #-64]
# CHECK: vstrh.32 q5, [r2, #-254] @ encoding: [0x0a,0xed,0x7f,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r2, #-254]
# CHECK: vstrh.32 q5, [r4, #254]! @ encoding: [0xac,0xed,0x7f,0xaf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q5, [r4, #254]!
# CHECK: vldrw.u32 q0, [r0] @ encoding: [0x90,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r0]
# CHECK: vldrw.u32 q1, [r0] @ encoding: [0x90,0xed,0x00,0x3f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q1, [r0]
# CHECK: vldrw.u32 q0, [r11] @ encoding: [0x9b,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r11]
# CHECK: vldrw.u32 q3, [r11] @ encoding: [0x9b,0xed,0x00,0x7f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q3, [r11]
# CHECK: vldrw.u32 q0, [r4, #56] @ encoding: [0x94,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r4, #56]
# CHECK: vldrw.u32 q4, [r4, #56] @ encoding: [0x94,0xed,0x0e,0x9f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q4, [r4, #56]
# CHECK: vldrw.u32 q0, [r8, #56] @ encoding: [0x98,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r8, #56]
# CHECK: vldrw.u32 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [r4, #56]!
# CHECK: vldrw.u32 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [r4, #56]!
# CHECK: vldrw.u32 q5, [r4], #-28 @ encoding: [0x34,0xec,0x07,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [r4], #-28
# CHECK: vldrw.u32 q5, [r10], #-28 @ encoding: [0x3a,0xec,0x07,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [r10], #-28
# CHECK: vldrw.u32 q5, [sp, #-28] @ encoding: [0x1d,0xed,0x07,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [sp, #-28]
# CHECK: vldrw.u32 q5, [sp, #-64] @ encoding: [0x1d,0xed,0x10,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [sp, #-64]
# CHECK: vldrw.u32 q5, [sp, #-508] @ encoding: [0x1d,0xed,0x7f,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [sp, #-508]
# CHECK: vldrw.u32 q5, [r4, #508]! @ encoding: [0xb4,0xed,0x7f,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q5, [r4, #508]!
# CHECK: vstrw.32 q0, [r0] @ encoding: [0x80,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [r0]
# CHECK: vstrw.32 q1, [r0] @ encoding: [0x80,0xed,0x00,0x3f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q1, [r0]
# CHECK: vstrw.32 q0, [r11] @ encoding: [0x8b,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [r11]
# CHECK: vstrw.32 q3, [r11] @ encoding: [0x8b,0xed,0x00,0x7f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q3, [r11]
# CHECK: vstrw.32 q0, [r4, #56] @ encoding: [0x84,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [r4, #56]
# CHECK: vstrw.32 q4, [r4, #56] @ encoding: [0x84,0xed,0x0e,0x9f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q4, [r4, #56]
# CHECK: vstrw.32 q0, [r8, #56] @ encoding: [0x88,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [r8, #56]
# CHECK: vstrw.32 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [r4, #56]!
# CHECK: vstrw.32 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [r4, #56]!
# CHECK: vstrw.32 q5, [r4], #-28 @ encoding: [0x24,0xec,0x07,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [r4], #-28
# CHECK: vstrw.32 q5, [r10], #-28 @ encoding: [0x2a,0xec,0x07,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [r10], #-28
# CHECK: vstrw.32 q5, [sp, #-28] @ encoding: [0x0d,0xed,0x07,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [sp, #-28]
# CHECK: vstrw.32 q5, [sp, #-64] @ encoding: [0x0d,0xed,0x10,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [sp, #-64]
# CHECK: vstrw.32 q5, [sp, #-508] @ encoding: [0x0d,0xed,0x7f,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [sp, #-508]
# CHECK: vstrw.32 q5, [r4, #508]! @ encoding: [0xa4,0xed,0x7f,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q5, [r4, #508]!
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
@@ -885,271 +885,271 @@ vstrw.32 q5, [sp, #-3]
vstrw.32 q5, [sp, #512]
# CHECK: vldrb.u8 q0, [r0, q1] @ encoding: [0x90,0xfc,0x02,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q0, [r0, q1]
# CHECK: vldrb.u8 q3, [r10, q1] @ encoding: [0x9a,0xfc,0x02,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q3, [r10, q1]
# CHECK: vldrb.u16 q0, [r0, q1] @ encoding: [0x90,0xfc,0x82,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q0, [r0, q1]
# CHECK: vldrb.u16 q3, [r9, q1] @ encoding: [0x99,0xfc,0x82,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q3, [r9, q1]
# CHECK: vldrb.s16 q0, [r0, q1] @ encoding: [0x90,0xec,0x82,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q0, [r0, q1]
# CHECK: vldrb.s16 q3, [sp, q1] @ encoding: [0x9d,0xec,0x82,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q3, [sp, q1]
# CHECK: vldrb.u32 q0, [r0, q1] @ encoding: [0x90,0xfc,0x02,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q0, [r0, q1]
# CHECK: vldrb.u32 q3, [r0, q1] @ encoding: [0x90,0xfc,0x02,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q3, [r0, q1]
# CHECK: vldrb.s32 q0, [r0, q1] @ encoding: [0x90,0xec,0x02,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q0, [r0, q1]
# CHECK: vldrb.s32 q3, [r0, q1] @ encoding: [0x90,0xec,0x02,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q3, [r0, q1]
# CHECK: vldrh.u16 q0, [r0, q1] @ encoding: [0x90,0xfc,0x92,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r0, q1]
# CHECK: vldrh.u16 q3, [r0, q1] @ encoding: [0x90,0xfc,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q3, [r0, q1]
# CHECK: vldrh.u32 q0, [r0, q1] @ encoding: [0x90,0xfc,0x12,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q0, [r0, q1]
# CHECK: vldrh.u32 q3, [r0, q1] @ encoding: [0x90,0xfc,0x12,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q3, [r0, q1]
# CHECK: vldrh.s32 q0, [r0, q1] @ encoding: [0x90,0xec,0x12,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q0, [r0, q1]
# CHECK: vldrh.s32 q3, [r0, q1] @ encoding: [0x90,0xec,0x12,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q3, [r0, q1]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u8 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u16 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s16 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.u32 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s32 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r0, q0, uxtw #1]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u32 q0, [r0, q0, uxtw #1]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s32 q0, [r0, q0, uxtw #1]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r0, q0, uxtw #2]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q0, [r0, q0]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector offset register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q0, [r0, q0, uxtw #3]
# CHECK: vldrh.u16 q0, [r0, q1, uxtw #1] @ encoding: [0x90,0xfc,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.u16 q0, [r0, q1, uxtw #1]
# CHECK: vldrw.u32 q0, [r0, q1] @ encoding: [0x90,0xfc,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r0, q1]
# CHECK: vldrw.u32 q3, [r0, q1] @ encoding: [0x90,0xfc,0x42,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q3, [r0, q1]
# CHECK: vldrw.u32 q0, [r0, q1, uxtw #2] @ encoding: [0x90,0xfc,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [r0, q1, uxtw #2]
# CHECK: vldrw.u32 q0, [sp, q1, uxtw #2] @ encoding: [0x9d,0xfc,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [sp, q1, uxtw #2]
# CHECK: vldrd.u64 q0, [r0, q1] @ encoding: [0x90,0xfc,0xd2,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q0, [r0, q1]
# CHECK: vldrd.u64 q3, [r0, q1] @ encoding: [0x90,0xfc,0xd2,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q3, [r0, q1]
# CHECK: vldrd.u64 q0, [r0, q1, uxtw #3] @ encoding: [0x90,0xfc,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q0, [r0, q1, uxtw #3]
# CHECK: vldrd.u64 q0, [sp, q1, uxtw #3] @ encoding: [0x9d,0xfc,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q0, [sp, q1, uxtw #3]
# CHECK: vstrb.8 q0, [r0, q1] @ encoding: [0x80,0xec,0x02,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q0, [r0, q1]
# CHECK: vstrb.8 q3, [r10, q1] @ encoding: [0x8a,0xec,0x02,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q3, [r10, q1]
# CHECK: vstrb.8 q3, [r0, q3] @ encoding: [0x80,0xec,0x06,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.8 q3, [r0, q3]
# CHECK: vstrb.16 q0, [r0, q1] @ encoding: [0x80,0xec,0x82,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q0, [r0, q1]
# CHECK: vstrb.16 q3, [sp, q1] @ encoding: [0x8d,0xec,0x82,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q3, [sp, q1]
# CHECK: vstrb.16 q3, [r0, q3] @ encoding: [0x80,0xec,0x86,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.16 q3, [r0, q3]
# CHECK: vstrb.32 q0, [r0, q1] @ encoding: [0x80,0xec,0x02,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q0, [r0, q1]
# CHECK: vstrb.32 q3, [r0, q1] @ encoding: [0x80,0xec,0x02,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q3, [r0, q1]
# CHECK: vstrb.32 q3, [r0, q3] @ encoding: [0x80,0xec,0x06,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.32 q3, [r0, q3]
# CHECK: vstrh.16 q0, [r0, q1] @ encoding: [0x80,0xec,0x92,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q0, [r0, q1]
# CHECK: vstrh.16 q3, [r0, q1] @ encoding: [0x80,0xec,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q3, [r0, q1]
# CHECK: vstrh.16 q3, [r0, q3] @ encoding: [0x80,0xec,0x96,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q3, [r0, q3]
# CHECK: vstrh.32 q0, [r0, q1] @ encoding: [0x80,0xec,0x12,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q0, [r0, q1]
# CHECK: vstrh.32 q3, [r0, q1] @ encoding: [0x80,0xec,0x12,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q3, [r0, q1]
# CHECK: vstrh.32 q3, [r0, q3] @ encoding: [0x80,0xec,0x16,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q3, [r0, q3]
# CHECK: vstrh.16 q0, [r0, q1, uxtw #1] @ encoding: [0x80,0xec,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.16 q0, [r0, q1, uxtw #1]
# CHECK: vstrh.32 q3, [r8, q3, uxtw #1] @ encoding: [0x88,0xec,0x17,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.32 q3, [r8, q3, uxtw #1]
# CHECK: vstrw.32 q0, [r0, q1] @ encoding: [0x80,0xec,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [r0, q1]
# CHECK: vstrw.32 q3, [r0, q1] @ encoding: [0x80,0xec,0x42,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q3, [r0, q1]
# CHECK: vstrw.32 q3, [r0, q3] @ encoding: [0x80,0xec,0x46,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q3, [r0, q3]
# CHECK: vstrw.32 q0, [r0, q1, uxtw #2] @ encoding: [0x80,0xec,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [r0, q1, uxtw #2]
# CHECK: vstrw.32 q0, [sp, q1, uxtw #2] @ encoding: [0x8d,0xec,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [sp, q1, uxtw #2]
# CHECK: vstrd.64 q0, [r0, q1] @ encoding: [0x80,0xec,0xd2,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q0, [r0, q1]
# CHECK: vstrd.64 q3, [r0, q1] @ encoding: [0x80,0xec,0xd2,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q3, [r0, q1]
# CHECK: vstrd.64 q3, [r0, q3] @ encoding: [0x80,0xec,0xd6,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q3, [r0, q3]
# CHECK: vstrd.64 q0, [r0, q1, uxtw #3] @ encoding: [0x80,0xec,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q0, [r0, q1, uxtw #3]
# CHECK: vstrd.64 q0, [sp, q1, uxtw #3] @ encoding: [0x8d,0xec,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q0, [sp, q1, uxtw #3]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: operand must be a register in range [q0, q7]
@@ -1189,79 +1189,79 @@ vstrh.16 q0, [r0, q1, uxtw #2]
vstrb.32 q0, [r11, q1, uxtw #1]
# CHECK: vldrw.u32 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q0, [q1]
# CHECK: vldrw.u32 q7, [q1] @ encoding: [0x92,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1]
# CHECK: vldrw.u32 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1]!
# CHECK: vldrw.u32 q7, [q1, #4] @ encoding: [0x92,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1, #4]
# CHECK: vldrw.u32 q7, [q1, #-4] @ encoding: [0x12,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1, #-4]
# CHECK: vldrw.u32 q7, [q1, #508] @ encoding: [0x92,0xfd,0x7f,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1, #508]
# CHECK: vldrw.u32 q7, [q1, #-508] @ encoding: [0x12,0xfd,0x7f,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1, #-508]
# CHECK: vldrw.u32 q7, [q1, #264] @ encoding: [0x92,0xfd,0x42,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1, #264]
# CHECK: vldrw.u32 q7, [q1, #4]! @ encoding: [0xb2,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1, #4]!
# CHECK: vstrw.32 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q0, [q1]
# CHECK: vstrw.32 q1, [q1] @ encoding: [0x82,0xfd,0x00,0x3e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q1, [q1]
# CHECK: vstrw.32 q7, [q1] @ encoding: [0x82,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q1]
# CHECK: vstrw.32 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q1]!
# CHECK: vstrw.32 q7, [q7] @ encoding: [0x8e,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q7]
# CHECK: vstrw.32 q7, [q1, #4] @ encoding: [0x82,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q1, #4]
# CHECK: vstrw.32 q7, [q1, #-4] @ encoding: [0x02,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q1, #-4]
# CHECK: vstrw.32 q7, [q1, #508] @ encoding: [0x82,0xfd,0x7f,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q1, #508]
# CHECK: vstrw.32 q7, [q1, #-508] @ encoding: [0x02,0xfd,0x7f,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q1, #-508]
# CHECK: vstrw.32 q7, [q1, #264]! @ encoding: [0xa2,0xfd,0x42,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.32 q7, [q1, #264]!
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: operand must be a register in range [q0, q7]
@@ -1277,103 +1277,103 @@ vstrw.32 q4, [q1, #3]!
vldrw.u32 q7, [q1, #512]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector pointer register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q1, [q1, #264]
# CHECK: vldrd.u64 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q0, [q1]
# CHECK: vldrd.u64 q7, [q1] @ encoding: [0x92,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1]
# CHECK: vldrd.u64 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1]!
# CHECK: vldrd.u64 q7, [q1, #8] @ encoding: [0x92,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #8]
# CHECK: vldrd.u64 q7, [q1, #-8] @ encoding: [0x12,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #-8]
# CHECK: vldrd.u64 q7, [q1, #1016] @ encoding: [0x92,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #1016]
# CHECK: vldrd.u64 q7, [q1, #-1016] @ encoding: [0x12,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #-1016]
# CHECK: vldrd.u64 q7, [q1, #264] @ encoding: [0x92,0xfd,0x21,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #264]
# CHECK: vldrd.u64 q7, [q1, #624] @ encoding: [0x92,0xfd,0x4e,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #624]
# CHECK: vldrd.u64 q7, [q1, #264] @ encoding: [0x92,0xfd,0x21,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #264]
# CHECK: vldrd.u64 q7, [q1, #-1016]! @ encoding: [0x32,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q7, [q1, #-1016]!
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: destination vector register and vector pointer register can't be identical
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.u64 q6, [q6]
# CHECK: vstrd.64 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q0, [q1]
# CHECK: vstrd.64 q1, [q1] @ encoding: [0x82,0xfd,0x00,0x3f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q1, [q1]
# CHECK: vstrd.64 q7, [q1] @ encoding: [0x82,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1]
# CHECK: vstrd.64 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1]!
# CHECK: vstrd.64 q7, [q7] @ encoding: [0x8e,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q7]
# CHECK: vstrd.64 q7, [q1, #8] @ encoding: [0x82,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1, #8]
# CHECK: vstrd.64 q7, [q1, #-8]! @ encoding: [0x22,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1, #-8]!
# CHECK: vstrd.64 q7, [q1, #1016] @ encoding: [0x82,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1, #1016]
# CHECK: vstrd.64 q7, [q1, #-1016] @ encoding: [0x02,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1, #-1016]
# CHECK: vstrd.64 q7, [q1, #264] @ encoding: [0x82,0xfd,0x21,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1, #264]
# CHECK: vstrd.64 q7, [q1, #624] @ encoding: [0x82,0xfd,0x4e,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1, #624]
# CHECK: vstrd.64 q7, [q1, #264] @ encoding: [0x82,0xfd,0x21,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.64 q7, [q1, #264]
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: operand must be a register in range [q0, q7]
@@ -1393,547 +1393,547 @@ vstrd.64 q4, [q1, #3]
vstrd.64 q4, [q1, #4]
# CHECK: vldrb.u8 q0, [r0] @ encoding: [0x90,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s8 q0, [r0]
# CHECK: vldrb.u8 q0, [r0] @ encoding: [0x90,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.8 q0, [r0]
# CHECK: vldrb.u8 q0, [r8, #56] @ encoding: [0x98,0xed,0x38,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s8 q0, [r8, #56]
# CHECK: vldrb.u8 q0, [r8, #56] @ encoding: [0x98,0xed,0x38,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.8 q0, [r8, #56]
# CHECK: vldrb.u8 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s8 q5, [r4, #56]!
# CHECK: vldrb.u8 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.8 q5, [r4, #56]!
# CHECK: vstrb.8 q0, [r0] @ encoding: [0x80,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.u8 q0, [r0]
# CHECK: vstrb.8 q0, [r0] @ encoding: [0x80,0xed,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.s8 q0, [r0]
# CHECK: vstrb.8 q4, [r4, #56] @ encoding: [0x84,0xed,0x38,0x9e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.u8 q4, [r4, #56]
# CHECK: vstrb.8 q4, [r4, #56] @ encoding: [0x84,0xed,0x38,0x9e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.s8 q4, [r4, #56]
# CHECK: vstrb.8 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.u8 q5, [r4, #56]!
# CHECK: vstrb.8 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x38,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.s8 q5, [r4, #56]!
# CHECK: vldrh.u16 q0, [r0] @ encoding: [0x90,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s16 q0, [r0]
# CHECK: vldrh.u16 q0, [r0] @ encoding: [0x90,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.f16 q0, [r0]
# CHECK: vldrh.u16 q0, [r0] @ encoding: [0x90,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.16 q0, [r0]
# CHECK: vldrh.u16 q0, [r4, #56] @ encoding: [0x94,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s16 q0, [r4, #56]
# CHECK: vldrh.u16 q0, [r4, #56] @ encoding: [0x94,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.f16 q0, [r4, #56]
# CHECK: vldrh.u16 q0, [r4, #56] @ encoding: [0x94,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.16 q0, [r4, #56]
# CHECK: vldrh.u16 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s16 q5, [r4, #56]!
# CHECK: vldrh.u16 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.f16 q5, [r4, #56]!
# CHECK: vldrh.u16 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.16 q5, [r4, #56]!
# CHECK: vstrh.16 q0, [r0] @ encoding: [0x80,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.u16 q0, [r0]
# CHECK: vstrh.16 q0, [r0] @ encoding: [0x80,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.s16 q0, [r0]
# CHECK: vstrh.16 q0, [r0] @ encoding: [0x80,0xed,0x80,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.f16 q0, [r0]
# CHECK: vstrh.16 q0, [r4, #56] @ encoding: [0x84,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.u16 q0, [r4, #56]
# CHECK: vstrh.16 q0, [r4, #56] @ encoding: [0x84,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.s16 q0, [r4, #56]
# CHECK: vstrh.16 q0, [r4, #56] @ encoding: [0x84,0xed,0x9c,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.f16 q0, [r4, #56]
# CHECK: vstrh.16 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.u16 q5, [r4, #56]!
# CHECK: vstrh.16 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.s16 q5, [r4, #56]!
# CHECK: vstrh.16 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x9c,0xbe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.f16 q5, [r4, #56]!
# CHECK: vldrw.u32 q0, [r0] @ encoding: [0x90,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q0, [r0]
# CHECK: vldrw.u32 q0, [r0] @ encoding: [0x90,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q0, [r0]
# CHECK: vldrw.u32 q0, [r0] @ encoding: [0x90,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q0, [r0]
# CHECK: vldrw.u32 q0, [r4, #56] @ encoding: [0x94,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q0, [r4, #56]
# CHECK: vldrw.u32 q0, [r4, #56] @ encoding: [0x94,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q0, [r4, #56]
# CHECK: vldrw.u32 q0, [r4, #56] @ encoding: [0x94,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q0, [r4, #56]
# CHECK: vldrw.u32 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q5, [r4, #56]!
# CHECK: vldrw.u32 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q5, [r4, #56]!
# CHECK: vldrw.u32 q5, [r4, #56]! @ encoding: [0xb4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q5, [r4, #56]!
# CHECK: vstrw.32 q0, [r0] @ encoding: [0x80,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q0, [r0]
# CHECK: vstrw.32 q0, [r0] @ encoding: [0x80,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q0, [r0]
# CHECK: vstrw.32 q0, [r0] @ encoding: [0x80,0xed,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q0, [r0]
# CHECK: vstrw.32 q0, [r4, #56] @ encoding: [0x84,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q0, [r4, #56]
# CHECK: vstrw.32 q0, [r4, #56] @ encoding: [0x84,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q0, [r4, #56]
# CHECK: vstrw.32 q0, [r4, #56] @ encoding: [0x84,0xed,0x0e,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q0, [r4, #56]
# CHECK: vstrw.32 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q5, [r4, #56]!
# CHECK: vstrw.32 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q5, [r4, #56]!
# CHECK: vstrw.32 q5, [r4, #56]! @ encoding: [0xa4,0xed,0x0e,0xbf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q5, [r4, #56]!
# CHECK: vldrb.u8 q0, [r0, q1] @ encoding: [0x90,0xfc,0x02,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.s8 q0, [r0, q1]
# CHECK: vldrb.u8 q0, [r0, q1] @ encoding: [0x90,0xfc,0x02,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrb.8 q0, [r0, q1]
# CHECK: vldrh.u16 q3, [r0, q1] @ encoding: [0x90,0xfc,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s16 q3, [r0, q1]
# CHECK: vldrh.u16 q3, [r0, q1] @ encoding: [0x90,0xfc,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.f16 q3, [r0, q1]
# CHECK: vldrh.u16 q3, [r0, q1] @ encoding: [0x90,0xfc,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.16 q3, [r0, q1]
# CHECK: vldrh.u16 q0, [r0, q1, uxtw #1] @ encoding: [0x90,0xfc,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.s16 q0, [r0, q1, uxtw #1]
# CHECK: vldrh.u16 q0, [r0, q1, uxtw #1] @ encoding: [0x90,0xfc,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.f16 q0, [r0, q1, uxtw #1]
# CHECK: vldrh.u16 q0, [r0, q1, uxtw #1] @ encoding: [0x90,0xfc,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrh.16 q0, [r0, q1, uxtw #1]
# CHECK: vldrw.u32 q0, [r0, q1] @ encoding: [0x90,0xfc,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q0, [r0, q1]
# CHECK: vldrw.u32 q0, [r0, q1] @ encoding: [0x90,0xfc,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q0, [r0, q1]
# CHECK: vldrw.u32 q0, [r0, q1] @ encoding: [0x90,0xfc,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q0, [r0, q1]
# CHECK: vldrw.u32 q0, [r0, q1, uxtw #2] @ encoding: [0x90,0xfc,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q0, [r0, q1, uxtw #2]
# CHECK: vldrw.u32 q0, [r0, q1, uxtw #2] @ encoding: [0x90,0xfc,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q0, [r0, q1, uxtw #2]
# CHECK: vldrw.u32 q0, [r0, q1, uxtw #2] @ encoding: [0x90,0xfc,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q0, [r0, q1, uxtw #2]
# CHECK: vldrd.u64 q0, [r0, q1] @ encoding: [0x90,0xfc,0xd2,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.s64 q0, [r0, q1]
# CHECK: vldrd.u64 q0, [r0, q1] @ encoding: [0x90,0xfc,0xd2,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.f64 q0, [r0, q1]
# CHECK: vldrd.u64 q0, [r0, q1] @ encoding: [0x90,0xfc,0xd2,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.64 q0, [r0, q1]
# CHECK: vldrd.u64 q0, [r0, q1, uxtw #3] @ encoding: [0x90,0xfc,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.s64 q0, [r0, q1, uxtw #3]
# CHECK: vldrd.u64 q0, [r0, q1, uxtw #3] @ encoding: [0x90,0xfc,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.f64 q0, [r0, q1, uxtw #3]
# CHECK: vldrd.u64 q0, [r0, q1, uxtw #3] @ encoding: [0x90,0xfc,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.64 q0, [r0, q1, uxtw #3]
# CHECK: vstrb.8 q0, [r0, q1] @ encoding: [0x80,0xec,0x02,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.u8 q0, [r0, q1]
# CHECK: vstrb.8 q0, [r0, q1] @ encoding: [0x80,0xec,0x02,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrb.s8 q0, [r0, q1]
# CHECK: vstrh.16 q3, [r0, q1] @ encoding: [0x80,0xec,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.u16 q3, [r0, q1]
# CHECK: vstrh.16 q3, [r0, q1] @ encoding: [0x80,0xec,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.s16 q3, [r0, q1]
# CHECK: vstrh.16 q3, [r0, q1] @ encoding: [0x80,0xec,0x92,0x6e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.f16 q3, [r0, q1]
# CHECK: vstrh.16 q0, [r0, q1, uxtw #1] @ encoding: [0x80,0xec,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.u16 q0, [r0, q1, uxtw #1]
# CHECK: vstrh.16 q0, [r0, q1, uxtw #1] @ encoding: [0x80,0xec,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.s16 q0, [r0, q1, uxtw #1]
# CHECK: vstrh.16 q0, [r0, q1, uxtw #1] @ encoding: [0x80,0xec,0x93,0x0e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrh.f16 q0, [r0, q1, uxtw #1]
# CHECK: vstrw.32 q0, [r0, q1] @ encoding: [0x80,0xec,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q0, [r0, q1]
# CHECK: vstrw.32 q0, [r0, q1] @ encoding: [0x80,0xec,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q0, [r0, q1]
# CHECK: vstrw.32 q0, [r0, q1] @ encoding: [0x80,0xec,0x42,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q0, [r0, q1]
# CHECK: vstrw.32 q0, [r0, q1, uxtw #2] @ encoding: [0x80,0xec,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q0, [r0, q1, uxtw #2]
# CHECK: vstrw.32 q0, [r0, q1, uxtw #2] @ encoding: [0x80,0xec,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q0, [r0, q1, uxtw #2]
# CHECK: vstrw.32 q0, [r0, q1, uxtw #2] @ encoding: [0x80,0xec,0x43,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q0, [r0, q1, uxtw #2]
# CHECK: vstrd.64 q3, [r0, q1] @ encoding: [0x80,0xec,0xd2,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.u64 q3, [r0, q1]
# CHECK: vstrd.64 q3, [r0, q1] @ encoding: [0x80,0xec,0xd2,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.s64 q3, [r0, q1]
# CHECK: vstrd.64 q3, [r0, q1] @ encoding: [0x80,0xec,0xd2,0x6f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.f64 q3, [r0, q1]
# CHECK: vstrd.64 q0, [r0, q1, uxtw #3] @ encoding: [0x80,0xec,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.u64 q0, [r0, q1, uxtw #3]
# CHECK: vstrd.64 q0, [r0, q1, uxtw #3] @ encoding: [0x80,0xec,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.s64 q0, [r0, q1, uxtw #3]
# CHECK: vstrd.64 q0, [r0, q1, uxtw #3] @ encoding: [0x80,0xec,0xd3,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.f64 q0, [r0, q1, uxtw #3]
# CHECK: vldrw.u32 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q0, [q1]
# CHECK: vldrw.u32 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q0, [q1]
# CHECK: vldrw.u32 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q0, [q1]
# CHECK: vldrw.u32 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q7, [q1]!
# CHECK: vldrw.u32 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q7, [q1]!
# CHECK: vldrw.u32 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q7, [q1]!
# CHECK: vldrw.u32 q7, [q1, #4] @ encoding: [0x92,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q7, [q1, #4]
# CHECK: vldrw.u32 q7, [q1, #4] @ encoding: [0x92,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q7, [q1, #4]
# CHECK: vldrw.u32 q7, [q1, #4] @ encoding: [0x92,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.32 q7, [q1, #4]
# CHECK: vldrw.u32 q7, [q1, #4]! @ encoding: [0xb2,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.s32 q7, [q1, #4]!
# CHECK: vldrw.u32 q7, [q1, #4]! @ encoding: [0xb2,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.f32 q7, [q1, #4]!
# CHECK: vldrw.u32 q7, [q1, #4]! @ encoding: [0xb2,0xfd,0x01,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrw.u32 q7, [q1, #4]!
# CHECK: vstrw.32 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q0, [q1]
# CHECK: vstrw.32 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q0, [q1]
# CHECK: vstrw.32 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1e]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q0, [q1]
# CHECK: vstrw.32 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q7, [q1]!
# CHECK: vstrw.32 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q7, [q1]!
# CHECK: vstrw.32 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q7, [q1]!
# CHECK: vstrw.32 q7, [q1, #508] @ encoding: [0x82,0xfd,0x7f,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q7, [q1, #508]
# CHECK: vstrw.32 q7, [q1, #508] @ encoding: [0x82,0xfd,0x7f,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q7, [q1, #508]
# CHECK: vstrw.32 q7, [q1, #508] @ encoding: [0x82,0xfd,0x7f,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q7, [q1, #508]
# CHECK: vstrw.32 q7, [q1, #264]! @ encoding: [0xa2,0xfd,0x42,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.u32 q7, [q1, #264]!
# CHECK: vstrw.32 q7, [q1, #264]! @ encoding: [0xa2,0xfd,0x42,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.s32 q7, [q1, #264]!
# CHECK: vstrw.32 q7, [q1, #264]! @ encoding: [0xa2,0xfd,0x42,0xfe]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrw.f32 q7, [q1, #264]!
# CHECK: vldrd.u64 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.s64 q0, [q1]
# CHECK: vldrd.u64 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.f64 q0, [q1]
# CHECK: vldrd.u64 q0, [q1] @ encoding: [0x92,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.64 q0, [q1]
# CHECK: vldrd.u64 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.s64 q7, [q1]!
# CHECK: vldrd.u64 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.f64 q7, [q1]!
# CHECK: vldrd.u64 q7, [q1]! @ encoding: [0xb2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.64 q7, [q1]!
# CHECK: vldrd.u64 q7, [q1, #8] @ encoding: [0x92,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.s64 q7, [q1, #8]
# CHECK: vldrd.u64 q7, [q1, #8] @ encoding: [0x92,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.f64 q7, [q1, #8]
# CHECK: vldrd.u64 q7, [q1, #8] @ encoding: [0x92,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.64 q7, [q1, #8]
# CHECK: vldrd.u64 q7, [q1, #-1016]! @ encoding: [0x32,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.s64 q7, [q1, #-1016]!
# CHECK: vldrd.u64 q7, [q1, #-1016]! @ encoding: [0x32,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.f64 q7, [q1, #-1016]!
# CHECK: vldrd.u64 q7, [q1, #-1016]! @ encoding: [0x32,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vldrd.64 q7, [q1, #-1016]!
# CHECK: vstrd.64 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.u64 q0, [q1]
# CHECK: vstrd.64 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.s64 q0, [q1]
# CHECK: vstrd.64 q0, [q1] @ encoding: [0x82,0xfd,0x00,0x1f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.f64 q0, [q1]
# CHECK: vstrd.64 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.u64 q7, [q1]!
# CHECK: vstrd.64 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.s64 q7, [q1]!
# CHECK: vstrd.64 q7, [q1]! @ encoding: [0xa2,0xfd,0x00,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.f64 q7, [q1]!
# CHECK: vstrd.64 q7, [q1, #1016] @ encoding: [0x82,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.u64 q7, [q1, #1016]
# CHECK: vstrd.64 q7, [q1, #1016] @ encoding: [0x82,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.s64 q7, [q1, #1016]
# CHECK: vstrd.64 q7, [q1, #1016] @ encoding: [0x82,0xfd,0x7f,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.f64 q7, [q1, #1016]
# CHECK: vstrd.64 q7, [q1, #-8]! @ encoding: [0x22,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.u64 q7, [q1, #-8]!
# CHECK: vstrd.64 q7, [q1, #-8]! @ encoding: [0x22,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.s64 q7, [q1, #-8]!
# CHECK: vstrd.64 q7, [q1, #-8]! @ encoding: [0x22,0xfd,0x01,0xff]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vstrd.f64 q7, [q1, #-8]!
vpste
diff --git a/llvm/test/MC/ARM/mve-misc.s b/llvm/test/MC/ARM/mve-misc.s
index f3af9e0..251a77b 100644
--- a/llvm/test/MC/ARM/mve-misc.s
+++ b/llvm/test/MC/ARM/mve-misc.s
@@ -7,63 +7,63 @@
# RUN: FileCheck --check-prefix=ERROR-NOMVE < %t %s
# CHECK: vpsel q0, q5, q2 @ encoding: [0x3b,0xfe,0x05,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vpsel q0, q5, q2
# CHECK: vpnot @ encoding: [0x31,0xfe,0x4d,0x0f]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vpnot
# CHECK: wlstp.8 lr, r0, #1668 @ encoding: [0x00,0xf0,0x43,0xc3]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r0, #1668
# CHECK: wlstp.16 lr, r0, #1668 @ encoding: [0x10,0xf0,0x43,0xc3]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.16 lr, r0, #1668
# CHECK: wlstp.32 lr, r4, #2706 @ encoding: [0x24,0xf0,0x49,0xcd]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.32 lr, r4, #2706
# CHECK: wlstp.64 lr, lr, #3026 @ encoding: [0x3e,0xf0,0xe9,0xcd]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.64 lr, lr, #3026
# CHECK: wlstp.8 lr, r5, #3436 @ encoding: [0x05,0xf0,0xb7,0xc6]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r5, #3436
# CHECK: wlstp.16 lr, r1, #1060 @ encoding: [0x11,0xf0,0x13,0xc2]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.16 lr, r1, #1060
# CHECK: wlstp.32 lr, r7, #4036 @ encoding: [0x27,0xf0,0xe3,0xc7]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.32 lr, r7, #4036
# CHECK: wlstp.8 lr, r1, #538 @ encoding: [0x01,0xf0,0x0d,0xc9]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r1, #538
# CHECK: wlstp.8 lr, r10, #1404 @ encoding: [0x0a,0xf0,0xbf,0xc2]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r10, #1404
# CHECK: wlstp.8 lr, r10, #1408 @ encoding: [0x0a,0xf0,0xc1,0xc2]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r10, #1408
# CHECK: wlstp.8 lr, r10, #2358 @ encoding: [0x0a,0xf0,0x9b,0xcc]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r10, #2358
# CHECK: wlstp.8 lr, r10, #4086 @ encoding: [0x0a,0xf0,0xfb,0xcf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r10, #4086
# CHECK: wlstp.8 lr, r11, #1442 @ encoding: [0x0b,0xf0,0xd1,0xca]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r11, #1442
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: loop end is out of range or not a positive multiple of 2
@@ -87,39 +87,39 @@ wlstp.16 lr, sp, #1442
wlstp.32 r10, r11, #1442
# CHECK: wlstp.8 lr, r1, .Lendloop @ encoding: [0x01'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.8 lr, r1, .Lendloop
# CHECK: wlstp.16 lr, r2, .Lendloop @ encoding: [0x12'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.16 lr, r2, .Lendloop
# CHECK: wlstp.32 lr, r3, .Lendloop @ encoding: [0x23'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.32 lr, r3, .Lendloop
# CHECK: wlstp.64 lr, r5, .Lendloop @ encoding: [0x35'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.64 lr, r5, .Lendloop
# CHECK: wlstp.64 lr, r5, #0 @ encoding: [0x35,0xf0,0x01,0xc0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
wlstp.64 lr, r5, #0
# CHECK: dlstp.8 lr, r5 @ encoding: [0x05,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
dlstp.8 lr, r5
# CHECK: dlstp.16 lr, r5 @ encoding: [0x15,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
dlstp.16 lr, r5
# CHECK: dlstp.32 lr, r7 @ encoding: [0x27,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
dlstp.32 lr, r7
# CHECK: dlstp.64 lr, r2 @ encoding: [0x32,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
dlstp.64 lr, r2
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: operand must be a register in range [r0, r12] or r14
@@ -135,15 +135,15 @@ dlstp.64 r10, r0
dlstp.64 lr, pc
# CHECK: letp lr, #-2 @ encoding: [0x1f,0xf0,0x01,0xc8]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
letp lr, #-2
# CHECK: letp lr, #-8 @ encoding: [0x1f,0xf0,0x05,0xc0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
letp lr, #-8
# CHECK: letp lr, #-4094 @ encoding: [0x1f,0xf0,0xff,0xcf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
letp lr, #-4094
# ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
@@ -159,7 +159,7 @@ letp lr, #8
letp lr, #-4096
# CHECK: letp lr, .Lstartloop @ encoding: [0x1f'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
letp lr, .Lstartloop
# CHECK: lctp @ encoding: [0x0f,0xf0,0x01,0xe0]
@@ -172,8 +172,11 @@ it eq
# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
lctpeq
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
vpste
+# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
vpselt.s16 q0, q1, q2
+# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
vpsele.i32 q0, q1, q2
# CHECK: vpste @ encoding: [0x71,0xfe,0x4d,0x8f]
# CHECK: vpselt q0, q1, q2 @ encoding: [0x33,0xfe,0x05,0x0f]
diff --git a/llvm/test/MC/ARM/neon-complex.s b/llvm/test/MC/ARM/neon-complex.s
index 0d428b5..6054a08 100644
--- a/llvm/test/MC/ARM/neon-complex.s
+++ b/llvm/test/MC/ARM/neon-complex.s
@@ -29,8 +29,10 @@
// FP16-ARM: vcmla.f16 q0, q1, q2, #0 @ encoding: [0x44,0x08,0x22,0xfc]
// FP16-THUMB: vcmla.f16 q0, q1, q2, #0 @ encoding: [0x22,0xfc,0x44,0x08]
// NO-FP16-STDERR: :[[@LINE-3]]:{{[0-9]*}}: note: instruction requires: full half-float
-// V82A: :[[@LINE-4]]:{{[0-9]*}}: error: instruction requires: armv8.3a
-// NO-NEON_STDERR: :[[@LINE-5]]:{{[0-9]*}}: error: instruction requires: NEON
+// V82A: :[[@LINE-4]]:{{[0-9]*}}: error: invalid instruction, any one of the following would fix this:
+// V82A: :[[@LINE-5]]:{{[0-9]*}}: note: instruction requires: mve.fp
+// V82A: :[[@LINE-6]]:{{[0-9]*}}: note: instruction requires: armv8.3a
+// NO-NEON_STDERR: :[[@LINE-7]]:{{[0-9]*}}: error: instruction requires: NEON
vcmla.f32 d0, d1, d2, #0
// ARM: vcmla.f32 d0, d1, d2, #0 @ encoding: [0x02,0x08,0x31,0xfc]
// THUMB: vcmla.f32 d0, d1, d2, #0 @ encoding: [0x31,0xfc,0x02,0x08]
@@ -39,8 +41,10 @@
vcmla.f32 q0, q1, q2, #0
// ARM: vcmla.f32 q0, q1, q2, #0 @ encoding: [0x44,0x08,0x32,0xfc]
// THUMB: vcmla.f32 q0, q1, q2, #0 @ encoding: [0x32,0xfc,0x44,0x08]
-// V82A: :[[@LINE-3]]:{{[0-9]*}}: error: instruction requires: armv8.3a
-// NO-NEON_STDERR: :[[@LINE-4]]:{{[0-9]*}}: error: instruction requires: NEON
+// V82A: :[[@LINE-3]]:{{[0-9]*}}: error: invalid instruction, any one of the following would fix this:
+// V82A: :[[@LINE-4]]:{{[0-9]*}}: note: instruction requires: mve.fp
+// V82A: :[[@LINE-5]]:{{[0-9]*}}: note: instruction requires: armv8.3a
+// NO-NEON_STDERR: :[[@LINE-6]]:{{[0-9]*}}: error: instruction requires: NEON
// Valid rotations
vcmla.f32 d0, d1, d2, #90
@@ -83,8 +87,10 @@
// FP16-ARM: vcadd.f16 q0, q1, q2, #90 @ encoding: [0x44,0x08,0x82,0xfc]
// FP16-THUMB: vcadd.f16 q0, q1, q2, #90 @ encoding: [0x82,0xfc,0x44,0x08]
// NO-FP16-STDERR: :[[@LINE-3]]:{{[0-9]*}}: note: instruction requires: full half-float
-// V82A: :[[@LINE-4]]:{{[0-9]*}}: error: instruction requires: armv8.3a
-// NO-NEON_STDERR: :[[@LINE-5]]:{{[0-9]*}}: error: instruction requires: NEON
+// V82A: :[[@LINE-4]]:{{[0-9]*}}: error: invalid instruction, any one of the following would fix this:
+// V82A: :[[@LINE-5]]:{{[0-9]*}}: note: instruction requires: mve.fp
+// V82A: :[[@LINE-6]]:{{[0-9]*}}: note: instruction requires: armv8.3a
+// NO-NEON_STDERR: :[[@LINE-7]]:{{[0-9]*}}: error: instruction requires: NEON
vcadd.f32 d0, d1, d2, #90
// ARM: vcadd.f32 d0, d1, d2, #90 @ encoding: [0x02,0x08,0x91,0xfc]
// THUMB: vcadd.f32 d0, d1, d2, #90 @ encoding: [0x91,0xfc,0x02,0x08]
@@ -93,8 +99,10 @@
vcadd.f32 q0, q1, q2, #90
// ARM: vcadd.f32 q0, q1, q2, #90 @ encoding: [0x44,0x08,0x92,0xfc]
// THUMB: vcadd.f32 q0, q1, q2, #90 @ encoding: [0x92,0xfc,0x44,0x08]
-// V82A: :[[@LINE-3]]:{{[0-9]*}}: error: instruction requires: armv8.3a
-// NO-NEON_STDERR: :[[@LINE-4]]:{{[0-9]*}}: error: instruction requires: NEON
+// V82A: :[[@LINE-3]]:{{[0-9]*}}: error: invalid instruction, any one of the following would fix this:
+// V82A: :[[@LINE-4]]:{{[0-9]*}}: note: instruction requires: mve.fp
+// V82A: :[[@LINE-5]]:{{[0-9]*}}: note: instruction requires: armv8.3a
+// NO-NEON_STDERR: :[[@LINE-6]]:{{[0-9]*}}: error: instruction requires: NEON
// Valid rotations
vcadd.f32 d0, d1, d2, #270
diff --git a/llvm/test/MC/ARM/no-mve.s b/llvm/test/MC/ARM/no-mve.s
index 668db40..2435ced 100644
--- a/llvm/test/MC/ARM/no-mve.s
+++ b/llvm/test/MC/ARM/no-mve.s
@@ -4,13 +4,13 @@
# RUN: FileCheck --check-prefix=CHECK-MVE < %t %s
# CHECK-MVE: instruction requires: mve.fp
-# CHECK: invalid instruction
+# CHECK: instruction requires: mve.fp
vcadd.f32 q1, q2, q3, #270
# CHECK-MVE: instruction requires: mve.fp
-# CHECK: invalid instruction
+# CHECK: instruction requires: mve.fp
vadd.f32 q1, q2, q3
# CHECK-MVE: vadd.i16 q1, q2, q3 @ encoding: [0x14,0xef,0x46,0x28]
-# CHECK: invalid instruction
+# CHECK: instruction requires: mve
vadd.i16 q1, q2, q3
diff --git a/llvm/test/MC/ARM/not-armv4.s b/llvm/test/MC/ARM/not-armv4.s
index c62c50c..b65b129 100644
--- a/llvm/test/MC/ARM/not-armv4.s
+++ b/llvm/test/MC/ARM/not-armv4.s
@@ -1,13 +1,21 @@
@ RUN: not llvm-mc < %s -triple armv4-unknown-unknown -show-encoding 2>&1 | FileCheck %s
@ PR18524
-@ CHECK: instruction requires: armv5t
+@ CHECK: error: invalid instruction, any one of the following would fix this:
+@ CHECK: note: instruction requires: armv5t
+@ CHECK: note: instruction requires: thumb2
clz r4,r9
-@ CHECK: instruction requires: armv6t2
+@ CHECK: error: invalid instruction, any one of the following would fix this:
+@ CHECK: note: instruction requires: armv6t2
+@ CHECK: note: instruction requires: thumb2
rbit r4,r9
@ CHECK: error: instruction requires: armv6t2
movw r4,#0x1234
-@ CHECK: error: instruction requires: armv6t2
+
+@ CHECK: error: invalid instruction, any one of the following would fix this:
+@ CHECK: note: invalid operand for instruction
+@ CHECK: note: operand must be a register in range [r0, r15]
+@ CHECK: note: instruction requires: armv6t2
mov r4,#0x1234
diff --git a/llvm/test/MC/ARM/register-token-source-loc.s b/llvm/test/MC/ARM/register-token-source-loc.s
index afb6ba0..7560f95 100644
--- a/llvm/test/MC/ARM/register-token-source-loc.s
+++ b/llvm/test/MC/ARM/register-token-source-loc.s
@@ -3,6 +3,9 @@
// CHECK: error: invalid instruction, any one of the following would fix this:
// CHECK-NEXT: add sp, r0, #4
// CHECK-NEXT: ^
+// CHECK-NEXT: note: operand must be a register in range [r0, r7]
+// CHECK-NEXT: add sp, r0, #4
+// CHECK-NEXT: ^
// CHECK-NEXT: note: operand must be a register sp
// CHECK-NEXT: add sp, r0, #4
// CHECK-NEXT: ^
diff --git a/llvm/test/MC/ARM/tMOVSr.s b/llvm/test/MC/ARM/tMOVSr.s
index 198c90a..09602fe 100644
--- a/llvm/test/MC/ARM/tMOVSr.s
+++ b/llvm/test/MC/ARM/tMOVSr.s
@@ -1,6 +1,7 @@
@ REQUIRES: asserts
-@ RUN: llvm-mc --triple=thumbv8 --debug %s 2>&1 | FileCheck %s --match-full-lines
+@ RUN: llvm-mc --triple=thumbv8 %s --show-encoding 2>&1 | FileCheck %s --match-full-lines
-@ CHECK: Changed to: <MCInst #{{[0-9]+}} tMOVSr <MCOperand Reg:{{[0-9]+}}> <MCOperand Reg:{{[0-9]+}}>>
+// Note this makes sure the narrow instruciton is selected
+@ CHECK: movs r2, r3 @ encoding: [0x1a,0x00]
.text
movs r2, r3
diff --git a/llvm/test/MC/ARM/thumb-diagnostics.s b/llvm/test/MC/ARM/thumb-diagnostics.s
index cacd7f2..171d60ac 100644
--- a/llvm/test/MC/ARM/thumb-diagnostics.s
+++ b/llvm/test/MC/ARM/thumb-diagnostics.s
@@ -28,9 +28,12 @@
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: note: instruction variant requires Thumb2
@ CHECK-ERRORS: note: operand must be a register sp
-@ CHECK-ERRORS-V5: error: instruction variant requires ARMv6 or later
+@ CHECK-ERRORS-V5: error: invalid instruction, any one of the following would fix this:
@ CHECK-ERRORS-V5: mov r2, r3
@ CHECK-ERRORS-V5: ^
+@ CHECK-ERRORS-V5: note: instruction requires: arm-mode
+@ CHECK-ERRORS-V5: note: operand must be an immediate in the range [0,255] or a relocatable expression
+@ CHECK-ERRORS-V5: note: instruction variant requires ARMv6 or later
@ Immediates where registers were expected
adds #0, r1, r2
@@ -225,10 +228,11 @@
@ Mismatched source/destination operands for MUL instruction.
muls r1, r2, r3
-@ CHECK-ERRORS: error: destination register must match source register
+@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
@ CHECK-ERRORS: muls r1, r2, r3
-@ CHECK-ERRORS: ^
-
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: note: destination register must match a source register
+@ CHECK-ERRORS: note: too many operands for instruction
@ Out of range immediates for STR instruction.
str r2, [r7, #-1]
@@ -274,30 +278,33 @@
@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
@ CHECK-ERRORS: add sp, #-1
@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: note: instruction requires: thumb2
+@ CHECK-ERRORS: add sp, #-1
+@ CHECK-ERRORS: ^
@ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
@ CHECK-ERRORS: add sp, #-1
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: note: invalid operand for instruction
@ CHECK-ERRORS: add sp, #-1
@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: note: instruction requires: thumb2
-@ CHECK-ERRORS: add sp, #-1
-@ CHECK-ERRORS: ^
@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
@ CHECK-ERRORS: add sp, #3
@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: note: instruction requires: thumb2
+@ CHECK-ERRORS: add sp, #3
+@ CHECK-ERRORS: ^
@ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
@ CHECK-ERRORS: add sp, #3
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: note: invalid operand for instruction
@ CHECK-ERRORS: add sp, #3
@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: note: instruction requires: thumb2
-@ CHECK-ERRORS: add sp, #3
-@ CHECK-ERRORS: ^
@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
@ CHECK-ERRORS: add sp, sp, #512
@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: note: instruction requires: thumb2
+@ CHECK-ERRORS: add sp, sp, #512
+@ CHECK-ERRORS: ^
@ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
@ CHECK-ERRORS: add sp, sp, #512
@ CHECK-ERRORS: ^
@@ -305,9 +312,6 @@
@ CHECK-ERRORS: add sp, sp, #512
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: note: instruction requires: thumb2
-@ CHECK-ERRORS: add sp, sp, #512
-@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: error: instruction requires: thumb2
@ CHECK-ERRORS: add r2, sp, #1024
@ CHECK-ERRORS: ^
add r2, sp, ip
@@ -407,7 +411,8 @@
adds
adds r0
@ CHECK-ERRORS: error: too few operands for instruction
-@ CHECK-ERRORS: error: too few operands for instruction
+@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
+@ CHECK-ERRORS: note: too few operands for instruction
@------------------------------------------------------------------------------
@ Out of range width for SBFX/UBFX
diff --git a/llvm/test/MC/ARM/thumb-mov.s b/llvm/test/MC/ARM/thumb-mov.s
index 6f662f3..e910722 100644
--- a/llvm/test/MC/ARM/thumb-mov.s
+++ b/llvm/test/MC/ARM/thumb-mov.s
@@ -58,10 +58,16 @@
movs sp, r0
movs r0, sp
movs sp, sp
-// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
// CHECK-V7-NEXT: movs sp, r0
-// CHECK-V7: instruction variant requires ARMv8 or later
+// CHECK-V7: note: instruction variant requires ARMv8 or later
+// CHECK-V7: note: operand must be a register in range [r0, r7]
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
// CHECK-V7-NEXT: movs r0, sp
+// CHECK-V7: note: instruction variant requires ARMv8 or later
+// CHECK-V7: note: invalid operand for instruction
+// CHECK-V7: note: operand must be an immediate in the range [0,255] or a relocatable expression
+// CHECK-V7: note: operand must be a register in range [r0, r7]
// CHECK-V7: error: instruction variant requires ARMv8 or later
// CHECK-V7-NEXT: movs sp, sp
// CHECK-V8: movs.w sp, r0 @ encoding: [0x5f,0xea,0x00,0x0d]
@@ -69,8 +75,9 @@
// CHECK-V8: movs.w sp, sp @ encoding: [0x5f,0xea,0x0d,0x0d]
mov.w sp, sp
-// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
// CHECK-V7-NEXT: mov.w sp, sp
+// CHECK-V7: note: instruction variant requires ARMv8 or later
// CHECK-V8: mov.w sp, sp @ encoding: [0x4f,0xea,0x0d,0x0d]
movs.w sp, r0
@@ -78,8 +85,9 @@
movs.w sp, sp
// CHECK-V7: error: instruction variant requires ARMv8 or later
// CHECK-V7-NEXT: movs.w sp, r0
-// CHECK-V7: instruction variant requires ARMv8 or later
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
// CHECK-V7-NEXT: movs.w r0, sp
+// CHECK-V7: note: instruction variant requires ARMv8 or later
// CHECK-V7: error: instruction variant requires ARMv8 or later
// CHECK-V7-NEXT: movs.w sp, sp
// CHECK-V8: movs.w sp, r0 @ encoding: [0x5f,0xea,0x00,0x0d]
diff --git a/llvm/test/MC/ARM/thumb2-diagnostics.s b/llvm/test/MC/ARM/thumb2-diagnostics.s
index 45efd3c..afb12ce 100644
--- a/llvm/test/MC/ARM/thumb2-diagnostics.s
+++ b/llvm/test/MC/ARM/thumb2-diagnostics.s
@@ -156,7 +156,9 @@ foo2:
adds
adds r0
@ CHECK-ERRORS: error: too few operands for instruction
-@ CHECK-ERRORS: error: too few operands for instruction
+@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
+@ CHECK-ERRORS: note: too few operands for instruction
+@ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
tst sp, #3
tst sp, r5
diff --git a/llvm/test/MC/ARM/vfp4.s b/llvm/test/MC/ARM/vfp4.s
index 37d03bb..f3bc575 100644
--- a/llvm/test/MC/ARM/vfp4.s
+++ b/llvm/test/MC/ARM/vfp4.s
@@ -23,7 +23,7 @@ vfma.f32 d16, d18, d17
@ ARM: vfma.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x08,0xf2]
@ THUMB: vfma.f32 q2, q4, q0 @ encoding: [0x08,0xef,0x50,0x4c]
-@ THUMB_V7EM-ERRORS: error: invalid instruction
+@ THUMB_V7EM-ERRORS: error: instruction requires: mve.fp
@ THUMB_V7EM-ERRORS-NEXT: vfma.f32 q2, q4, q0
vfma.f32 q2, q4, q0
@@ -57,7 +57,7 @@ vfms.f32 d16, d18, d17
@ ARM: vfms.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x28,0xf2]
@ THUMB: vfms.f32 q2, q4, q0 @ encoding: [0x28,0xef,0x50,0x4c]
-@ THUMB_V7EM-ERRORS: error: invalid instruction
+@ THUMB_V7EM-ERRORS: error: instruction requires: mve.fp
@ THUMB_V7EM-ERRORS-NEXT: vfms.f32 q2, q4, q0
vfms.f32 q2, q4, q0
diff --git a/llvm/test/MC/BPF/insn-unit.s b/llvm/test/MC/BPF/insn-unit.s
index 224eb73..84735d1 100644
--- a/llvm/test/MC/BPF/insn-unit.s
+++ b/llvm/test/MC/BPF/insn-unit.s
@@ -65,8 +65,10 @@
// CHECK: 8d 02 00 00 00 00 00 00 callx r2
// ======== BPF_JMP Class ========
+ may_goto Llabel0 // BPF_JCOND | BPF_K
if r1 & r2 goto Llabel0 // BPF_JSET | BPF_X
if r1 & 0xffff goto Llabel0 // BPF_JSET | BPF_K
+// CHECK: e5 00 1e 00 00 00 00 00 may_goto +30
// CHECK: 4d 21 1d 00 00 00 00 00 if r1 & r2 goto +29
// CHECK: 45 01 1c 00 ff ff 00 00 if r1 & 65535 goto +28
diff --git a/llvm/test/MC/COFF/dwarf5lineinfo.s b/llvm/test/MC/COFF/dwarf5lineinfo.s
new file mode 100644
index 0000000..f0789fe
--- /dev/null
+++ b/llvm/test/MC/COFF/dwarf5lineinfo.s
@@ -0,0 +1,13 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-windows-gnu %s -o - | llvm-readobj -r - | FileCheck %s
+
+// CHECK: Relocations [
+// CHECK: Section (4) .debug_line {
+// CHECK: 0x22 IMAGE_REL_AMD64_SECREL .debug_line_str (8)
+// CHECK: 0x2C IMAGE_REL_AMD64_SECREL .debug_line_str (8)
+// CHECK: 0x36 IMAGE_REL_AMD64_ADDR64 .text (0)
+// CHECK: }
+
+main:
+ .file 0 "/" "test.c"
+ .loc 0 1 0
+ retq
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_sop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_sop1.txt
index fbb9545..929f3d2 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_sop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_sop1.txt
@@ -2536,6 +2536,9 @@
# GFX11: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA) ; encoding: [0x85,0x4c,0x80,0xbe]
0x85,0x4c,0x80,0xbe
+# GFX11: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA_TO_PC) ; encoding: [0x86,0x4c,0x80,0xbe]
+0x86,0x4c,0x80,0xbe
+
# GFX11: s_setpc_b64 s[0:1] ; encoding: [0x00,0x48,0x80,0xbe]
0x00,0x48,0x80,0xbe
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt
deleted file mode 100644
index b22fd5e..0000000
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt
+++ /dev/null
@@ -1,251 +0,0 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble %s | FileCheck -strict-whitespace -check-prefix=GFX11 %s
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# Check that unused bits in the encoding are ignored.
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x80,0xcd,0x01,0x05,0x0e,0x1c
-
-# GFX11: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX11: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX11: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX11: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX11: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt
index 0c4427c..1be97b2 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt
@@ -12,8 +12,13 @@
# GFX12: v_add3_u32_e64_dpp v5, v1, 42, v0 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x54,0x01,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x55,0xd6,0xe9,0x54,0x01,0x04,0x01,0x77,0x39,0x05
-# GFX1150: v_add3_u32_e64_dpp v5, v1, s2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05]
+# GFX12: v_add3_u32_e64_dpp v5, v1, s2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x55,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05
-# GFX1150: v_cmp_ne_i32_e64_dpp vcc_lo, v1, s2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x6a,0x00,0x45,0xd4,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+# GFX12: v_cmp_ne_i32_e64_dpp vcc_lo, v1, s2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x6a,0x00,0x45,0xd4,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
0x6a,0x00,0x45,0xd4,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05
+
+# Check that unused bits in the encoding are ignored.
+# This is more strict than the check in vinterp-fake16.txt and is GFX12 specific.
+# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
+0x00,0x00,0xe0,0xcd,0x01,0x05,0x0e,0x1c
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt
index c87cea1..f8c235f 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt
@@ -3279,9 +3279,12 @@
# GFX12: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA) ; encoding: [0x85,0x4c,0x80,0xbe]
0x85,0x4c,0x80,0xbe
-# GFX12: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_SE_AID_ID) ; encoding: [0x86,0x4c,0x80,0xbe]
+# GFX12: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_TBA_TO_PC) ; encoding: [0x86,0x4c,0x80,0xbe]
0x86,0x4c,0x80,0xbe
+# GFX12: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_SE_AID_ID) ; encoding: [0x87,0x4c,0x80,0xbe]
+0x87,0x4c,0x80,0xbe
+
# GFX12: s_setpc_b64 s[0:1] ; encoding: [0x00,0x48,0x80,0xbe]
0x00,0x48,0x80,0xbe
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt
deleted file mode 100644
index 977cd73..0000000
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt
+++ /dev/null
@@ -1,251 +0,0 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -disassemble %s | FileCheck -strict-whitespace -check-prefix=GFX12 %s
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# Check that unused bits in the encoding are ignored.
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0xe0,0xcd,0x01,0x05,0x0e,0x1c
-
-# GFX12: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX12: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX12: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX12: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX12: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4
diff --git a/llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt b/llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt
new file mode 100644
index 0000000..239f1d8
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt
@@ -0,0 +1,252 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -disassemble %s | FileCheck -strict-whitespace -check-prefix=CHECK %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -disassemble %s | FileCheck -strict-whitespace -check-prefix=CHECK %s
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# Check that unused bits in the encoding are ignored.
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x80,0xcd,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0{{$}}
+0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04
+
+# CHECK: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0{{$}}
+0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04
+
+# CHECK: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0{{$}}
+0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
+0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0{{$}}
+0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04
+
+# CHECK: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0{{$}}
+0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04
+
+# CHECK: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0{{$}}
+0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
+0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4
diff --git a/llvm/test/MC/Disassembler/X86/apx/cfcmov.txt b/llvm/test/MC/Disassembler/X86/apx/cfcmov.txt
new file mode 100644
index 0000000..4ecaa4b
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/apx/cfcmov.txt
@@ -0,0 +1,842 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s --check-prefixes=ATT
+# RUN: llvm-mc --disassemble %s -triple=x86_64 -x86-asm-syntax=intel --output-asm-variant=1 | FileCheck %s --check-prefixes=INTEL
+
+# ATT: cfcmovbw %r17w, %r21w, %r25w
+# INTEL: cfcmovb r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x42,0xe9
+
+# ATT: cfcmovbw %r17w, %r21w
+# INTEL: cfcmovb r21w, r17w
+0x62,0xec,0x7d,0x0c,0x42,0xcd
+
+# ATT: cfcmovbw %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovb word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x42,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbl %r18d, %r22d, %r26d
+# INTEL: cfcmovb r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x42,0xf2
+
+# ATT: cfcmovbl %r18d, %r22d
+# INTEL: cfcmovb r22d, r18d
+0x62,0xec,0x7c,0x0c,0x42,0xd6
+
+# ATT: cfcmovbl %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovb dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x42,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbq %r19, %r23, %r27
+# INTEL: cfcmovb r27, r23, r19
+0x62,0xec,0xa4,0x14,0x42,0xfb
+
+# ATT: cfcmovbq %r19, %r23
+# INTEL: cfcmovb r23, r19
+0x62,0xec,0xfc,0x0c,0x42,0xdf
+
+# ATT: cfcmovbq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovb qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x42,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbw 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovb r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x42,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbw 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovb r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x42,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbl 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovb r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x42,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbl 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovb r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x42,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovb r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x42,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovb r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x42,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbew %r17w, %r21w, %r25w
+# INTEL: cfcmovbe r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x46,0xe9
+
+# ATT: cfcmovbew %r17w, %r21w
+# INTEL: cfcmovbe r21w, r17w
+0x62,0xec,0x7d,0x0c,0x46,0xcd
+
+# ATT: cfcmovbew %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovbe word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x46,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbel %r18d, %r22d, %r26d
+# INTEL: cfcmovbe r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x46,0xf2
+
+# ATT: cfcmovbel %r18d, %r22d
+# INTEL: cfcmovbe r22d, r18d
+0x62,0xec,0x7c,0x0c,0x46,0xd6
+
+# ATT: cfcmovbel %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovbe dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x46,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbeq %r19, %r23, %r27
+# INTEL: cfcmovbe r27, r23, r19
+0x62,0xec,0xa4,0x14,0x46,0xfb
+
+# ATT: cfcmovbeq %r19, %r23
+# INTEL: cfcmovbe r23, r19
+0x62,0xec,0xfc,0x0c,0x46,0xdf
+
+# ATT: cfcmovbeq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovbe qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x46,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbew 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovbe r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x46,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbew 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovbe r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x46,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbel 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovbe r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x46,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbel 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovbe r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x46,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbeq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovbe r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x46,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovbeq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovbe r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x46,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlw %r17w, %r21w, %r25w
+# INTEL: cfcmovl r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x4c,0xe9
+
+# ATT: cfcmovlw %r17w, %r21w
+# INTEL: cfcmovl r21w, r17w
+0x62,0xec,0x7d,0x0c,0x4c,0xcd
+
+# ATT: cfcmovlw %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovl word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovll %r18d, %r22d, %r26d
+# INTEL: cfcmovl r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x4c,0xf2
+
+# ATT: cfcmovll %r18d, %r22d
+# INTEL: cfcmovl r22d, r18d
+0x62,0xec,0x7c,0x0c,0x4c,0xd6
+
+# ATT: cfcmovll %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovl dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x4c,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlq %r19, %r23, %r27
+# INTEL: cfcmovl r27, r23, r19
+0x62,0xec,0xa4,0x14,0x4c,0xfb
+
+# ATT: cfcmovlq %r19, %r23
+# INTEL: cfcmovl r23, r19
+0x62,0xec,0xfc,0x0c,0x4c,0xdf
+
+# ATT: cfcmovlq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovl qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlw 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovl r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlw 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovl r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovll 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovl r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x4c,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovll 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovl r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x4c,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovl r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovl r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlew %r17w, %r21w, %r25w
+# INTEL: cfcmovle r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x4e,0xe9
+
+# ATT: cfcmovlew %r17w, %r21w
+# INTEL: cfcmovle r21w, r17w
+0x62,0xec,0x7d,0x0c,0x4e,0xcd
+
+# ATT: cfcmovlew %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovle word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlel %r18d, %r22d, %r26d
+# INTEL: cfcmovle r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x4e,0xf2
+
+# ATT: cfcmovlel %r18d, %r22d
+# INTEL: cfcmovle r22d, r18d
+0x62,0xec,0x7c,0x0c,0x4e,0xd6
+
+# ATT: cfcmovlel %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovle dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x4e,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovleq %r19, %r23, %r27
+# INTEL: cfcmovle r27, r23, r19
+0x62,0xec,0xa4,0x14,0x4e,0xfb
+
+# ATT: cfcmovleq %r19, %r23
+# INTEL: cfcmovle r23, r19
+0x62,0xec,0xfc,0x0c,0x4e,0xdf
+
+# ATT: cfcmovleq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovle qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlew 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovle r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlew 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovle r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlel 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovle r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x4e,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovlel 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovle r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x4e,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovleq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovle r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovleq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovle r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaew %r17w, %r21w, %r25w
+# INTEL: cfcmovae r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x43,0xe9
+
+# ATT: cfcmovaew %r17w, %r21w
+# INTEL: cfcmovae r21w, r17w
+0x62,0xec,0x7d,0x0c,0x43,0xcd
+
+# ATT: cfcmovaew %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovae word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x43,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovael %r18d, %r22d, %r26d
+# INTEL: cfcmovae r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x43,0xf2
+
+# ATT: cfcmovael %r18d, %r22d
+# INTEL: cfcmovae r22d, r18d
+0x62,0xec,0x7c,0x0c,0x43,0xd6
+
+# ATT: cfcmovael %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovae dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x43,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaeq %r19, %r23, %r27
+# INTEL: cfcmovae r27, r23, r19
+0x62,0xec,0xa4,0x14,0x43,0xfb
+
+# ATT: cfcmovaeq %r19, %r23
+# INTEL: cfcmovae r23, r19
+0x62,0xec,0xfc,0x0c,0x43,0xdf
+
+# ATT: cfcmovaeq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovae qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x43,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaew 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovae r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x43,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaew 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovae r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x43,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovael 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovae r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x43,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovael 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovae r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x43,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaeq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovae r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x43,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaeq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovae r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x43,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaw %r17w, %r21w, %r25w
+# INTEL: cfcmova r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x47,0xe9
+
+# ATT: cfcmovaw %r17w, %r21w
+# INTEL: cfcmova r21w, r17w
+0x62,0xec,0x7d,0x0c,0x47,0xcd
+
+# ATT: cfcmovaw %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmova word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x47,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmoval %r18d, %r22d, %r26d
+# INTEL: cfcmova r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x47,0xf2
+
+# ATT: cfcmoval %r18d, %r22d
+# INTEL: cfcmova r22d, r18d
+0x62,0xec,0x7c,0x0c,0x47,0xd6
+
+# ATT: cfcmoval %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmova dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x47,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaq %r19, %r23, %r27
+# INTEL: cfcmova r27, r23, r19
+0x62,0xec,0xa4,0x14,0x47,0xfb
+
+# ATT: cfcmovaq %r19, %r23
+# INTEL: cfcmova r23, r19
+0x62,0xec,0xfc,0x0c,0x47,0xdf
+
+# ATT: cfcmovaq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmova qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x47,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaw 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmova r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x47,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaw 291(%r28,%r29,4), %r17w
+# INTEL: cfcmova r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x47,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmoval 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmova r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x47,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmoval 291(%r28,%r29,4), %r18d
+# INTEL: cfcmova r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x47,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmova r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x47,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovaq 291(%r28,%r29,4), %r19
+# INTEL: cfcmova r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x47,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgew %r17w, %r21w, %r25w
+# INTEL: cfcmovge r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x4d,0xe9
+
+# ATT: cfcmovgew %r17w, %r21w
+# INTEL: cfcmovge r21w, r17w
+0x62,0xec,0x7d,0x0c,0x4d,0xcd
+
+# ATT: cfcmovgew %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovge word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgel %r18d, %r22d, %r26d
+# INTEL: cfcmovge r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x4d,0xf2
+
+# ATT: cfcmovgel %r18d, %r22d
+# INTEL: cfcmovge r22d, r18d
+0x62,0xec,0x7c,0x0c,0x4d,0xd6
+
+# ATT: cfcmovgel %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovge dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x4d,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgeq %r19, %r23, %r27
+# INTEL: cfcmovge r27, r23, r19
+0x62,0xec,0xa4,0x14,0x4d,0xfb
+
+# ATT: cfcmovgeq %r19, %r23
+# INTEL: cfcmovge r23, r19
+0x62,0xec,0xfc,0x0c,0x4d,0xdf
+
+# ATT: cfcmovgeq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovge qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgew 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovge r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgew 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovge r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgel 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovge r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x4d,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgel 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovge r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x4d,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgeq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovge r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovgeq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovge r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnow %r17w, %r21w, %r25w
+# INTEL: cfcmovno r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x41,0xe9
+
+# ATT: cfcmovnow %r17w, %r21w
+# INTEL: cfcmovno r21w, r17w
+0x62,0xec,0x7d,0x0c,0x41,0xcd
+
+# ATT: cfcmovnow %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovno word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x41,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnol %r18d, %r22d, %r26d
+# INTEL: cfcmovno r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x41,0xf2
+
+# ATT: cfcmovnol %r18d, %r22d
+# INTEL: cfcmovno r22d, r18d
+0x62,0xec,0x7c,0x0c,0x41,0xd6
+
+# ATT: cfcmovnol %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovno dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x41,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnoq %r19, %r23, %r27
+# INTEL: cfcmovno r27, r23, r19
+0x62,0xec,0xa4,0x14,0x41,0xfb
+
+# ATT: cfcmovnoq %r19, %r23
+# INTEL: cfcmovno r23, r19
+0x62,0xec,0xfc,0x0c,0x41,0xdf
+
+# ATT: cfcmovnoq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovno qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x41,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnow 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovno r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x41,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnow 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovno r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x41,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnol 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovno r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x41,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnol 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovno r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x41,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnoq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovno r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x41,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnoq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovno r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x41,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpw %r17w, %r21w, %r25w
+# INTEL: cfcmovnp r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x4b,0xe9
+
+# ATT: cfcmovnpw %r17w, %r21w
+# INTEL: cfcmovnp r21w, r17w
+0x62,0xec,0x7d,0x0c,0x4b,0xcd
+
+# ATT: cfcmovnpw %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovnp word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpl %r18d, %r22d, %r26d
+# INTEL: cfcmovnp r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x4b,0xf2
+
+# ATT: cfcmovnpl %r18d, %r22d
+# INTEL: cfcmovnp r22d, r18d
+0x62,0xec,0x7c,0x0c,0x4b,0xd6
+
+# ATT: cfcmovnpl %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovnp dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x4b,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpq %r19, %r23, %r27
+# INTEL: cfcmovnp r27, r23, r19
+0x62,0xec,0xa4,0x14,0x4b,0xfb
+
+# ATT: cfcmovnpq %r19, %r23
+# INTEL: cfcmovnp r23, r19
+0x62,0xec,0xfc,0x0c,0x4b,0xdf
+
+# ATT: cfcmovnpq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovnp qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpw 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovnp r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpw 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovnp r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpl 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovnp r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x4b,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpl 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovnp r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x4b,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovnp r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnpq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovnp r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsw %r17w, %r21w, %r25w
+# INTEL: cfcmovns r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x49,0xe9
+
+# ATT: cfcmovnsw %r17w, %r21w
+# INTEL: cfcmovns r21w, r17w
+0x62,0xec,0x7d,0x0c,0x49,0xcd
+
+# ATT: cfcmovnsw %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovns word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x49,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsl %r18d, %r22d, %r26d
+# INTEL: cfcmovns r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x49,0xf2
+
+# ATT: cfcmovnsl %r18d, %r22d
+# INTEL: cfcmovns r22d, r18d
+0x62,0xec,0x7c,0x0c,0x49,0xd6
+
+# ATT: cfcmovnsl %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovns dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x49,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsq %r19, %r23, %r27
+# INTEL: cfcmovns r27, r23, r19
+0x62,0xec,0xa4,0x14,0x49,0xfb
+
+# ATT: cfcmovnsq %r19, %r23
+# INTEL: cfcmovns r23, r19
+0x62,0xec,0xfc,0x0c,0x49,0xdf
+
+# ATT: cfcmovnsq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovns qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x49,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsw 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovns r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x49,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsw 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovns r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x49,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsl 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovns r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x49,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsl 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovns r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x49,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovns r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x49,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnsq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovns r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x49,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnew %r17w, %r21w, %r25w
+# INTEL: cfcmovne r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x45,0xe9
+
+# ATT: cfcmovnew %r17w, %r21w
+# INTEL: cfcmovne r21w, r17w
+0x62,0xec,0x7d,0x0c,0x45,0xcd
+
+# ATT: cfcmovnew %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovne word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x45,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnel %r18d, %r22d, %r26d
+# INTEL: cfcmovne r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x45,0xf2
+
+# ATT: cfcmovnel %r18d, %r22d
+# INTEL: cfcmovne r22d, r18d
+0x62,0xec,0x7c,0x0c,0x45,0xd6
+
+# ATT: cfcmovnel %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovne dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x45,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovneq %r19, %r23, %r27
+# INTEL: cfcmovne r27, r23, r19
+0x62,0xec,0xa4,0x14,0x45,0xfb
+
+# ATT: cfcmovneq %r19, %r23
+# INTEL: cfcmovne r23, r19
+0x62,0xec,0xfc,0x0c,0x45,0xdf
+
+# ATT: cfcmovneq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovne qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x45,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnew 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovne r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x45,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnew 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovne r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x45,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnel 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovne r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x45,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovnel 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovne r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x45,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovneq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovne r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x45,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovneq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovne r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x45,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpw %r17w, %r21w, %r25w
+# INTEL: cfcmovp r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x4a,0xe9
+
+# ATT: cfcmovpw %r17w, %r21w
+# INTEL: cfcmovp r21w, r17w
+0x62,0xec,0x7d,0x0c,0x4a,0xcd
+
+# ATT: cfcmovpw %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovp word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpl %r18d, %r22d, %r26d
+# INTEL: cfcmovp r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x4a,0xf2
+
+# ATT: cfcmovpl %r18d, %r22d
+# INTEL: cfcmovp r22d, r18d
+0x62,0xec,0x7c,0x0c,0x4a,0xd6
+
+# ATT: cfcmovpl %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovp dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x4a,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpq %r19, %r23, %r27
+# INTEL: cfcmovp r27, r23, r19
+0x62,0xec,0xa4,0x14,0x4a,0xfb
+
+# ATT: cfcmovpq %r19, %r23
+# INTEL: cfcmovp r23, r19
+0x62,0xec,0xfc,0x0c,0x4a,0xdf
+
+# ATT: cfcmovpq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovp qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpw 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovp r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpw 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovp r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpl 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovp r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x4a,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpl 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovp r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x4a,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovp r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovpq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovp r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsw %r17w, %r21w, %r25w
+# INTEL: cfcmovs r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x48,0xe9
+
+# ATT: cfcmovsw %r17w, %r21w
+# INTEL: cfcmovs r21w, r17w
+0x62,0xec,0x7d,0x0c,0x48,0xcd
+
+# ATT: cfcmovsw %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmovs word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x48,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsl %r18d, %r22d, %r26d
+# INTEL: cfcmovs r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x48,0xf2
+
+# ATT: cfcmovsl %r18d, %r22d
+# INTEL: cfcmovs r22d, r18d
+0x62,0xec,0x7c,0x0c,0x48,0xd6
+
+# ATT: cfcmovsl %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmovs dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x48,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsq %r19, %r23, %r27
+# INTEL: cfcmovs r27, r23, r19
+0x62,0xec,0xa4,0x14,0x48,0xfb
+
+# ATT: cfcmovsq %r19, %r23
+# INTEL: cfcmovs r23, r19
+0x62,0xec,0xfc,0x0c,0x48,0xdf
+
+# ATT: cfcmovsq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmovs qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x48,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsw 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmovs r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x48,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsw 291(%r28,%r29,4), %r17w
+# INTEL: cfcmovs r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x48,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsl 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmovs r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x48,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsl 291(%r28,%r29,4), %r18d
+# INTEL: cfcmovs r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x48,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmovs r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x48,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovsq 291(%r28,%r29,4), %r19
+# INTEL: cfcmovs r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x48,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovew %r17w, %r21w, %r25w
+# INTEL: cfcmove r25w, r21w, r17w
+0x62,0xec,0x35,0x14,0x44,0xe9
+
+# ATT: cfcmovew %r17w, %r21w
+# INTEL: cfcmove r21w, r17w
+0x62,0xec,0x7d,0x0c,0x44,0xcd
+
+# ATT: cfcmovew %r17w, 291(%r28,%r29,4)
+# INTEL: cfcmove word ptr [r28 + 4*r29 + 291], r17w
+0x62,0x8c,0x79,0x0c,0x44,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovel %r18d, %r22d, %r26d
+# INTEL: cfcmove r26d, r22d, r18d
+0x62,0xec,0x2c,0x14,0x44,0xf2
+
+# ATT: cfcmovel %r18d, %r22d
+# INTEL: cfcmove r22d, r18d
+0x62,0xec,0x7c,0x0c,0x44,0xd6
+
+# ATT: cfcmovel %r18d, 291(%r28,%r29,4)
+# INTEL: cfcmove dword ptr [r28 + 4*r29 + 291], r18d
+0x62,0x8c,0x78,0x0c,0x44,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmoveq %r19, %r23, %r27
+# INTEL: cfcmove r27, r23, r19
+0x62,0xec,0xa4,0x14,0x44,0xfb
+
+# ATT: cfcmoveq %r19, %r23
+# INTEL: cfcmove r23, r19
+0x62,0xec,0xfc,0x0c,0x44,0xdf
+
+# ATT: cfcmoveq %r19, 291(%r28,%r29,4)
+# INTEL: cfcmove qword ptr [r28 + 4*r29 + 291], r19
+0x62,0x8c,0xf8,0x0c,0x44,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovew 291(%r28,%r29,4), %r17w, %r21w
+# INTEL: cfcmove r21w, r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x51,0x14,0x44,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovew 291(%r28,%r29,4), %r17w
+# INTEL: cfcmove r17w, word ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x79,0x08,0x44,0x8c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovel 291(%r28,%r29,4), %r18d, %r22d
+# INTEL: cfcmove r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x48,0x14,0x44,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmovel 291(%r28,%r29,4), %r18d
+# INTEL: cfcmove r18d, dword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0x78,0x08,0x44,0x94,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmoveq 291(%r28,%r29,4), %r19, %r23
+# INTEL: cfcmove r23, r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xc0,0x14,0x44,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: cfcmoveq 291(%r28,%r29,4), %r19
+# INTEL: cfcmove r19, qword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xf8,0x08,0x44,0x9c,0xac,0x23,0x01,0x00,0x00
diff --git a/llvm/test/MC/Disassembler/X86/apx/cmov.txt b/llvm/test/MC/Disassembler/X86/apx/cmov.txt
new file mode 100644
index 0000000..cc96fb1
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/apx/cmov.txt
@@ -0,0 +1,386 @@
+# RUN: llvm-mc -triple x86_64 -disassemble %s | FileCheck %s --check-prefix=ATT
+# RUN: llvm-mc -triple x86_64 -disassemble -output-asm-variant=1 %s | FileCheck %s --check-prefix=INTEL
+
+# ATT: cmovbw %dx, %ax, %r9w
+# INTEL: cmovb r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x42,0xc2
+
+# ATT: cmovbl %ecx, %edx, %r10d
+# INTEL: cmovb r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x42,0xd1
+
+# ATT: cmovbq %r9, %r15, %r11
+# INTEL: cmovb r11, r15, r9
+0x62,0x54,0xa4,0x18,0x42,0xf9
+
+# ATT: cmovbw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovb ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x42,0x54,0x80,0x7b
+
+# ATT: cmovbl 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovb edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x42,0x4c,0x80,0x7b
+
+# ATT: cmovbq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovb r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x42,0x4c,0x80,0x7b
+
+# ATT: cmovbew %dx, %ax, %r9w
+# INTEL: cmovbe r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x46,0xc2
+
+# ATT: cmovbel %ecx, %edx, %r10d
+# INTEL: cmovbe r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x46,0xd1
+
+# ATT: cmovbeq %r9, %r15, %r11
+# INTEL: cmovbe r11, r15, r9
+0x62,0x54,0xa4,0x18,0x46,0xf9
+
+# ATT: cmovbew 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovbe ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x46,0x54,0x80,0x7b
+
+# ATT: cmovbel 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovbe edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x46,0x4c,0x80,0x7b
+
+# ATT: cmovbeq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovbe r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x46,0x4c,0x80,0x7b
+
+# ATT: cmovlw %dx, %ax, %r9w
+# INTEL: cmovl r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x4c,0xc2
+
+# ATT: cmovll %ecx, %edx, %r10d
+# INTEL: cmovl r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x4c,0xd1
+
+# ATT: cmovlq %r9, %r15, %r11
+# INTEL: cmovl r11, r15, r9
+0x62,0x54,0xa4,0x18,0x4c,0xf9
+
+# ATT: cmovlw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovl ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x4c,0x54,0x80,0x7b
+
+# ATT: cmovll 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovl edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x4c,0x4c,0x80,0x7b
+
+# ATT: cmovlq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovl r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x4c,0x4c,0x80,0x7b
+
+# ATT: cmovlew %dx, %ax, %r9w
+# INTEL: cmovle r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x4e,0xc2
+
+# ATT: cmovlel %ecx, %edx, %r10d
+# INTEL: cmovle r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x4e,0xd1
+
+# ATT: cmovleq %r9, %r15, %r11
+# INTEL: cmovle r11, r15, r9
+0x62,0x54,0xa4,0x18,0x4e,0xf9
+
+# ATT: cmovlew 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovle ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x4e,0x54,0x80,0x7b
+
+# ATT: cmovlel 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovle edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x4e,0x4c,0x80,0x7b
+
+# ATT: cmovleq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovle r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x4e,0x4c,0x80,0x7b
+
+# ATT: cmovaew %dx, %ax, %r9w
+# INTEL: cmovae r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x43,0xc2
+
+# ATT: cmovael %ecx, %edx, %r10d
+# INTEL: cmovae r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x43,0xd1
+
+# ATT: cmovaeq %r9, %r15, %r11
+# INTEL: cmovae r11, r15, r9
+0x62,0x54,0xa4,0x18,0x43,0xf9
+
+# ATT: cmovaew 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovae ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x43,0x54,0x80,0x7b
+
+# ATT: cmovael 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovae edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x43,0x4c,0x80,0x7b
+
+# ATT: cmovaeq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovae r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x43,0x4c,0x80,0x7b
+
+# ATT: cmovaw %dx, %ax, %r9w
+# INTEL: cmova r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x47,0xc2
+
+# ATT: cmoval %ecx, %edx, %r10d
+# INTEL: cmova r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x47,0xd1
+
+# ATT: cmovaq %r9, %r15, %r11
+# INTEL: cmova r11, r15, r9
+0x62,0x54,0xa4,0x18,0x47,0xf9
+
+# ATT: cmovaw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmova ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x47,0x54,0x80,0x7b
+
+# ATT: cmoval 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmova edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x47,0x4c,0x80,0x7b
+
+# ATT: cmovaq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmova r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x47,0x4c,0x80,0x7b
+
+# ATT: cmovgew %dx, %ax, %r9w
+# INTEL: cmovge r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x4d,0xc2
+
+# ATT: cmovgel %ecx, %edx, %r10d
+# INTEL: cmovge r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x4d,0xd1
+
+# ATT: cmovgeq %r9, %r15, %r11
+# INTEL: cmovge r11, r15, r9
+0x62,0x54,0xa4,0x18,0x4d,0xf9
+
+# ATT: cmovgew 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovge ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x4d,0x54,0x80,0x7b
+
+# ATT: cmovgel 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovge edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x4d,0x4c,0x80,0x7b
+
+# ATT: cmovgeq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovge r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x4d,0x4c,0x80,0x7b
+
+# ATT: cmovgw %dx, %ax, %r9w
+# INTEL: cmovg r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x4f,0xc2
+
+# ATT: cmovgl %ecx, %edx, %r10d
+# INTEL: cmovg r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x4f,0xd1
+
+# ATT: cmovgq %r9, %r15, %r11
+# INTEL: cmovg r11, r15, r9
+0x62,0x54,0xa4,0x18,0x4f,0xf9
+
+# ATT: cmovgw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovg ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x4f,0x54,0x80,0x7b
+
+# ATT: cmovgl 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovg edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x4f,0x4c,0x80,0x7b
+
+# ATT: cmovgq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovg r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x4f,0x4c,0x80,0x7b
+
+# ATT: cmovnow %dx, %ax, %r9w
+# INTEL: cmovno r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x41,0xc2
+
+# ATT: cmovnol %ecx, %edx, %r10d
+# INTEL: cmovno r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x41,0xd1
+
+# ATT: cmovnoq %r9, %r15, %r11
+# INTEL: cmovno r11, r15, r9
+0x62,0x54,0xa4,0x18,0x41,0xf9
+
+# ATT: cmovnow 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovno ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x41,0x54,0x80,0x7b
+
+# ATT: cmovnol 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovno edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x41,0x4c,0x80,0x7b
+
+# ATT: cmovnoq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovno r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x41,0x4c,0x80,0x7b
+
+# ATT: cmovnpw %dx, %ax, %r9w
+# INTEL: cmovnp r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x4b,0xc2
+
+# ATT: cmovnpl %ecx, %edx, %r10d
+# INTEL: cmovnp r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x4b,0xd1
+
+# ATT: cmovnpq %r9, %r15, %r11
+# INTEL: cmovnp r11, r15, r9
+0x62,0x54,0xa4,0x18,0x4b,0xf9
+
+# ATT: cmovnpw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovnp ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x4b,0x54,0x80,0x7b
+
+# ATT: cmovnpl 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovnp edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x4b,0x4c,0x80,0x7b
+
+# ATT: cmovnpq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovnp r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x4b,0x4c,0x80,0x7b
+
+# ATT: cmovnsw %dx, %ax, %r9w
+# INTEL: cmovns r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x49,0xc2
+
+# ATT: cmovnsl %ecx, %edx, %r10d
+# INTEL: cmovns r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x49,0xd1
+
+# ATT: cmovnsq %r9, %r15, %r11
+# INTEL: cmovns r11, r15, r9
+0x62,0x54,0xa4,0x18,0x49,0xf9
+
+# ATT: cmovnsw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovns ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x49,0x54,0x80,0x7b
+
+# ATT: cmovnsl 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovns edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x49,0x4c,0x80,0x7b
+
+# ATT: cmovnsq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovns r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x49,0x4c,0x80,0x7b
+
+# ATT: cmovnew %dx, %ax, %r9w
+# INTEL: cmovne r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x45,0xc2
+
+# ATT: cmovnel %ecx, %edx, %r10d
+# INTEL: cmovne r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x45,0xd1
+
+# ATT: cmovneq %r9, %r15, %r11
+# INTEL: cmovne r11, r15, r9
+0x62,0x54,0xa4,0x18,0x45,0xf9
+
+# ATT: cmovnew 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovne ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x45,0x54,0x80,0x7b
+
+# ATT: cmovnel 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovne edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x45,0x4c,0x80,0x7b
+
+# ATT: cmovneq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovne r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x45,0x4c,0x80,0x7b
+
+# ATT: cmovow %dx, %ax, %r9w
+# INTEL: cmovo r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x40,0xc2
+
+# ATT: cmovol %ecx, %edx, %r10d
+# INTEL: cmovo r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x40,0xd1
+
+# ATT: cmovoq %r9, %r15, %r11
+# INTEL: cmovo r11, r15, r9
+0x62,0x54,0xa4,0x18,0x40,0xf9
+
+# ATT: cmovow 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovo ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x40,0x54,0x80,0x7b
+
+# ATT: cmovol 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovo edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x40,0x4c,0x80,0x7b
+
+# ATT: cmovoq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovo r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x40,0x4c,0x80,0x7b
+
+# ATT: cmovpw %dx, %ax, %r9w
+# INTEL: cmovp r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x4a,0xc2
+
+# ATT: cmovpl %ecx, %edx, %r10d
+# INTEL: cmovp r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x4a,0xd1
+
+# ATT: cmovpq %r9, %r15, %r11
+# INTEL: cmovp r11, r15, r9
+0x62,0x54,0xa4,0x18,0x4a,0xf9
+
+# ATT: cmovpw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovp ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x4a,0x54,0x80,0x7b
+
+# ATT: cmovpl 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovp edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x4a,0x4c,0x80,0x7b
+
+# ATT: cmovpq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovp r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x4a,0x4c,0x80,0x7b
+
+# ATT: cmovsw %dx, %ax, %r9w
+# INTEL: cmovs r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x48,0xc2
+
+# ATT: cmovsl %ecx, %edx, %r10d
+# INTEL: cmovs r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x48,0xd1
+
+# ATT: cmovsq %r9, %r15, %r11
+# INTEL: cmovs r11, r15, r9
+0x62,0x54,0xa4,0x18,0x48,0xf9
+
+# ATT: cmovsw 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmovs ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x48,0x54,0x80,0x7b
+
+# ATT: cmovsl 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmovs edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x48,0x4c,0x80,0x7b
+
+# ATT: cmovsq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmovs r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x48,0x4c,0x80,0x7b
+
+# ATT: cmovew %dx, %ax, %r9w
+# INTEL: cmove r9w, ax, dx
+0x62,0xf4,0x35,0x18,0x44,0xc2
+
+# ATT: cmovel %ecx, %edx, %r10d
+# INTEL: cmove r10d, edx, ecx
+0x62,0xf4,0x2c,0x18,0x44,0xd1
+
+# ATT: cmoveq %r9, %r15, %r11
+# INTEL: cmove r11, r15, r9
+0x62,0x54,0xa4,0x18,0x44,0xf9
+
+# ATT: cmovew 123(%r8,%rax,4), %dx, %ax
+# INTEL: cmove ax, dx, word ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x7d,0x18,0x44,0x54,0x80,0x7b
+
+# ATT: cmovel 123(%r8,%rax,4), %ecx, %edx
+# INTEL: cmove edx, ecx, dword ptr [r8 + 4*rax + 123]
+0x62,0xd4,0x6c,0x18,0x44,0x4c,0x80,0x7b
+
+# ATT: cmoveq 123(%r8,%rax,4), %r9, %r15
+# INTEL: cmove r15, r9, qword ptr [r8 + 4*rax + 123]
+0x62,0x54,0x84,0x18,0x44,0x4c,0x80,0x7b
diff --git a/llvm/test/MC/Disassembler/X86/apx/evex-format.txt b/llvm/test/MC/Disassembler/X86/apx/evex-format.txt
index 1c1f70b..1156f5c 100644
--- a/llvm/test/MC/Disassembler/X86/apx/evex-format.txt
+++ b/llvm/test/MC/Disassembler/X86/apx/evex-format.txt
@@ -11,6 +11,12 @@
# INTEL: add r18, qword ptr [r17 + 123], r16
0x62,0xec,0xec,0x10,0x01,0x41,0x7b
+## MRMDestMemCC
+
+# ATT: cfcmovbq %r16, 123(%r17,%r18,4)
+# INTEL: cfcmovb qword ptr [r17 + 4*r18 + 123], r16
+0x62,0xec,0xf8,0x0c,0x42,0x44,0x91,0x7b
+
## MRMSrcMem
# ATT: vbroadcasti32x4 (%r16,%r17), %zmm0
@@ -21,6 +27,16 @@
# INTEL: sub r18, r17, qword ptr [r16 + 123]
0x62,0xec,0xec,0x10,0x2b,0x48,0x7b
+## MRMSrcMemCC
+
+# ATT: cfcmovbq 123(%r16,%r17,4), %r18
+# INTEL: cfcmovb r18, qword ptr [r16 + 4*r17 + 123]
+0x62,0xec,0xf8,0x08,0x42,0x54,0x88,0x7b
+
+# ATT: cfcmovbq 123(%r16,%r17,4), %r18, %r19
+# INTEL: cfcmovb r19, r18, qword ptr [r16 + 4*r17 + 123]
+0x62,0xec,0xe0,0x14,0x42,0x54,0x88,0x7b
+
## MRM0m
# ATT: vprorq $0, (%r16,%r17), %zmm0
@@ -123,12 +139,28 @@
# INTEL: {nf} add r17, r16
0x62,0xec,0xfc,0x0c,0x01,0xc1
+## MRMDestRegCC
+
+# ATT: cfcmovbq %r16, %r17
+# INTEL: cfcmovb r17, r16
+0x62,0xec,0xfc,0x0c,0x42,0xc1
+
## MRMSrcReg
# ATT: mulxq %r16, %r17, %r18
# INTEL: mulx r18, r17, r16
0x62,0xea,0xf7,0x00,0xf6,0xd0
+## MRMSrcRegCC
+
+# ATT: cfcmovbq %r16, %r17, %r18
+# INTEL: cfcmovb r18, r17, r16
+0x62,0xec,0xec,0x14,0x42,0xc8
+
+# ATT: cfcmovlq %r16, %r17, %r18
+# INTEL: cfcmovl r18, r17, r16
+0x62,0xec,0xec,0x14,0x4c,0xc8
+
## MRMSrcReg4VOp3
# ATT: bzhiq %r19, %r23, %r27
diff --git a/llvm/test/MC/Disassembler/X86/apx/imulzu.txt b/llvm/test/MC/Disassembler/X86/apx/imulzu.txt
new file mode 100644
index 0000000..86142e05
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/apx/imulzu.txt
@@ -0,0 +1,50 @@
+# RUN: llvm-mc -triple x86_64 -disassemble %s | FileCheck %s --check-prefix=ATT
+# RUN: llvm-mc -triple x86_64 -disassemble -output-asm-variant=1 %s | FileCheck %s --check-prefix=INTEL
+
+# ATT: imulzuw $123, %dx, %dx
+# INTEL: imulzu dx, dx, 123
+0x62,0xf4,0x7d,0x18,0x6b,0xd2,0x7b
+
+# ATT: imulzul $123, %ecx, %ecx
+# INTEL: imulzu ecx, ecx, 123
+0x62,0xf4,0x7c,0x18,0x6b,0xc9,0x7b
+
+# ATT: imulzuq $123, %r9, %r9
+# INTEL: imulzu r9, r9, 123
+0x62,0x54,0xfc,0x18,0x6b,0xc9,0x7b
+
+# ATT: imulzuw $123, 291(%r8,%rax,4), %dx
+# INTEL: imulzu dx, word ptr [r8 + 4*rax + 291], 123
+0x62,0xd4,0x7d,0x18,0x6b,0x94,0x80,0x23,0x01,0x00,0x00,0x7b
+
+# ATT: imulzul $123, 291(%r8,%rax,4), %ecx
+# INTEL: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123
+0x62,0xd4,0x7c,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b
+
+# ATT: imulzuq $123, 291(%r8,%rax,4), %r9
+# INTEL: imulzu r9, qword ptr [r8 + 4*rax + 291], 123
+0x62,0x54,0xfc,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b
+
+# ATT: imulzuw $1234, %dx, %dx
+# INTEL: imulzu dx, dx, 1234
+0x62,0xf4,0x7d,0x18,0x69,0xd2,0xd2,0x04
+
+# ATT: imulzuw $1234, 291(%r8,%rax,4), %dx
+# INTEL: imulzu dx, word ptr [r8 + 4*rax + 291], 1234
+0x62,0xd4,0x7d,0x18,0x69,0x94,0x80,0x23,0x01,0x00,0x00,0xd2,0x04
+
+# ATT: imulzul $123456, %ecx, %ecx
+# INTEL: imulzu ecx, ecx, 123456
+0x62,0xf4,0x7c,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00
+
+# ATT: imulzuq $123456, %r9, %r9
+# INTEL: imulzu r9, r9, 123456
+0x62,0x54,0xfc,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00
+
+# ATT: imulzul $123456, 291(%r8,%rax,4), %ecx
+# INTEL: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123456
+0x62,0xd4,0x7c,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00
+
+# ATT: imulzuq $123456, 291(%r8,%rax,4), %r9
+# INTEL: imulzu r9, qword ptr [r8 + 4*rax + 291], 123456
+0x62,0x54,0xfc,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00
diff --git a/llvm/test/MC/Disassembler/X86/apx/reverse-encoding.txt b/llvm/test/MC/Disassembler/X86/apx/reverse-encoding.txt
index 9e812e3..fd12d90 100644
--- a/llvm/test/MC/Disassembler/X86/apx/reverse-encoding.txt
+++ b/llvm/test/MC/Disassembler/X86/apx/reverse-encoding.txt
@@ -430,3 +430,17 @@
# ATT: ccmpoq {dfv=} %r16, %r17
# INTEL: ccmpo {dfv=} r17, r16
0x62,0xec,0x84,0x00,0x3b,0xc8
+
+## cfcmov
+
+# ATT: cfcmovbew %r16w, %r17w
+# INTEL: cfcmovbe r17w, r16w
+0x62,0xec,0x7d,0x08,0x46,0xc8
+
+# ATT: cfcmovbel %r16d, %r17d
+# INTEL: cfcmovbe r17d, r16d
+0x62,0xec,0x7c,0x08,0x46,0xc8
+
+# ATT: cfcmovbeq %r16, %r17
+# INTEL: cfcmovbe r17, r16
+0x62,0xec,0xfc,0x08,0x46,0xc8
diff --git a/llvm/test/MC/GOFF/ppa1.ll b/llvm/test/MC/GOFF/ppa1.ll
index 40fc9e9..13971c7 100644
--- a/llvm/test/MC/GOFF/ppa1.ll
+++ b/llvm/test/MC/GOFF/ppa1.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple s390x-ibm-zos < %s | FileCheck %s
; REQUIRES: systemz-registered-target
-; CHECK: @@EPM_void_test_0: * @void_test
+; CHECK: L#EPM_void_test_0: * @void_test
; CHECK: * XPLINK Routine Layout Entry
; CHECK: .long 12779717 * Eyecatcher 0x00C300C500C500
; CHECK: .short 197
@@ -11,9 +11,9 @@
; CHECK: * Entry Flags
; CHECK: * Bit 1: 1 = Leaf function
; CHECK: * Bit 2: 0 = Does not use alloca
-; CHECK: @@func_end0:
+; CHECK: L#func_end0:
; CHECK: .section ".ppa1"
-; CHECK: @@PPA1_void_test_0: * PPA1
+; CHECK: L#PPA1_void_test_0: * PPA1
; CHECK: .byte 2 * Version
; CHECK: .byte 206 * LE Signature X'CE'
; CHECK: .short 0 * Saved GPR Mask
@@ -25,8 +25,8 @@
; CHECK: .byte 0 * PPA1 Flags 3
; CHECK: .byte 129 * PPA1 Flags 4
; CHECK: .short 0 * Length/4 of Parms
-; CHECK: .long @@func_end0-@@EPM_void_test_0 * Length of Code
-; CHECK: .long @@EPM_void_test_0-@@PPA1_void_test_0
+; CHECK: .long L#func_end0-L#EPM_void_test_0 * Length of Code
+; CHECK: .long L#EPM_void_test_0-L#PPA1_void_test_0
; CHECK: .section ".text"
; CHECK: * -- End function
define void @void_test() {
diff --git a/llvm/test/MC/Hexagon/directive-attribute-err.s b/llvm/test/MC/Hexagon/directive-attribute-err.s
new file mode 100644
index 0000000..52b145b
--- /dev/null
+++ b/llvm/test/MC/Hexagon/directive-attribute-err.s
@@ -0,0 +1,24 @@
+/// attribute parsing error cases.
+
+// RUN: not llvm-mc -triple=hexagon -filetype=asm %s 2>&1 \
+// RUN: | FileCheck %s
+
+ .attribute Tag_unknown_name, 0
+// CHECK: [[#@LINE-1]]:14: error: attribute name not recognized: Tag_unknown_name
+// CHECK-NEXT: .attribute Tag_unknown_name
+
+ .attribute [non_constant_expression], 0
+// CHECK: [[#@LINE-1]]:14: error: expected numeric constant
+// CHECK-NEXT: .attribute [non_constant_expression], 0
+
+ .attribute 42, "forty two"
+// CHECK: [[#@LINE-1]]:18: error: expected numeric constant
+// CHECK-NEXT: .attribute 42, "forty two"
+
+ .attribute Tag_arch, "v75"
+// CHECK: [[#@LINE-1]]:24: error: expected numeric constant
+// CHECK-NEXT: .attribute Tag_arch, "v75"
+
+ .attribute 0
+// CHECK: :[[#@LINE-1]]:15: error: expected comma
+// CHECK-NEXT: .attribute 0
diff --git a/llvm/test/MC/Hexagon/directive-attribute.s b/llvm/test/MC/Hexagon/directive-attribute.s
new file mode 100644
index 0000000..d7c8930
--- /dev/null
+++ b/llvm/test/MC/Hexagon/directive-attribute.s
@@ -0,0 +1,41 @@
+/// Check .attribute parsing.
+
+// RUN: llvm-mc -triple=hexagon -filetype=obj %s | llvm-readelf -A - | \
+// RUN: FileCheck %s --match-full-lines --implicit-check-not={{.}}
+
+.attribute 4, 71 // Tag_arch
+.attribute Tag_cabac, 1
+.attribute Tag_hvx_arch, 68
+.attribute 7, 1 // Tag_hvx_qfloat
+
+// CHECK: BuildAttributes {
+// CHECK-NEXT: FormatVersion: 0x41
+// CHECK-NEXT: Section 1 {
+// CHECK-NEXT: SectionLength: 25
+// CHECK-NEXT: Vendor: hexagon
+// CHECK-NEXT: Tag: Tag_File (0x1)
+// CHECK-NEXT: Size: 13
+// CHECK-NEXT: FileAttributes {
+// CHECK-NEXT: Attribute {
+// CHECK-NEXT: Tag: 4
+// CHECK-NEXT: TagName: arch
+// CHECK-NEXT: Value: 71
+// CHECK-NEXT: }
+// CHECK-NEXT: Attribute {
+// CHECK-NEXT: Tag: 10
+// CHECK-NEXT: TagName: cabac
+// CHECK-NEXT: Value: 1
+// CHECK-NEXT: }
+// CHECK-NEXT: Attribute {
+// CHECK-NEXT: Tag: 5
+// CHECK-NEXT: TagName: hvx_arch
+// CHECK-NEXT: Value: 68
+// CHECK-NEXT: }
+// CHECK-NEXT: Attribute {
+// CHECK-NEXT: Tag: 7
+// CHECK-NEXT: TagName: hvx_qfloat
+// CHECK-NEXT: Value: 1
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: }
diff --git a/llvm/test/MC/Hexagon/hexagon_attributes.s b/llvm/test/MC/Hexagon/hexagon_attributes.s
new file mode 100644
index 0000000..e905360
--- /dev/null
+++ b/llvm/test/MC/Hexagon/hexagon_attributes.s
@@ -0,0 +1,94 @@
+/// Check that file attributes are recorded in a .hexagon.attributes section.
+
+q0&=vcmp.gt(v0.bf,v0.bf) // hvxv73, hvx-qfloat
+r3:2=cround(r1:0,#0x0) // v67, audio
+v3:0.w=vrmpyz(v0.b,r0.b) // hvxv73, zreg
+v1:0.sf=vadd(v0.bf,v0.bf) // hvxv73, hvx-ieee-fp
+
+// RUN: llvm-mc --mattr=+v67,+hvxv73,+hvx-qfloat,+hvx-ieee-fp,+zreg,+audio %s \
+// RUN: -triple=hexagon -filetype=obj --hexagon-add-build-attributes -o %t.o
+
+// RUN: llvm-readelf -A %t.o | \
+// RUN: FileCheck %s --match-full-lines --implicit-check-not={{.}} --check-prefix=READELF
+
+/// llvm-objudmp should be able to determine subtarget features
+/// without manually passing in features when an attribute section is present.
+// RUN: llvm-objdump -d %t.o | FileCheck %s --check-prefix=OBJDUMP
+
+// RUN: llvm-mc --mattr=+v67,+hvxv73,+hvx-qfloat,+hvx-ieee-fp,+zreg,+audio %s \
+// RUN: -triple=hexagon -filetype=asm --hexagon-add-build-attributes | \
+// RUN: FileCheck %s --match-full-lines --implicit-check-not={{.}} --check-prefix=ASM
+
+// READELF: BuildAttributes {
+// READELF-NEXT: FormatVersion: 0x41
+// READELF-NEXT: Section 1 {
+// READELF-NEXT: SectionLength: 31
+// READELF-NEXT: Vendor: hexagon
+// READELF-NEXT: Tag: Tag_File (0x1)
+// READELF-NEXT: Size: 19
+// READELF-NEXT: FileAttributes {
+// READELF-NEXT: Attribute {
+// READELF-NEXT: Tag: 4
+// READELF-NEXT: TagName: arch
+// READELF-NEXT: Value: 67
+// READELF-NEXT: }
+// READELF-NEXT: Attribute {
+// READELF-NEXT: Tag: 5
+// READELF-NEXT: TagName: hvx_arch
+// READELF-NEXT: Value: 73
+// READELF-NEXT: }
+// READELF-NEXT: Attribute {
+// READELF-NEXT: Tag: 6
+// READELF-NEXT: TagName: hvx_ieeefp
+// READELF-NEXT: Value: 1
+// READELF-NEXT: }
+// READELF-NEXT: Attribute {
+// READELF-NEXT: Tag: 7
+// READELF-NEXT: TagName: hvx_qfloat
+// READELF-NEXT: Value: 1
+// READELF-NEXT: }
+// READELF-NEXT: Attribute {
+// READELF-NEXT: Tag: 8
+// READELF-NEXT: TagName: zreg
+// READELF-NEXT: Value: 1
+// READELF-NEXT: }
+// READELF-NEXT: Attribute {
+// READELF-NEXT: Tag: 9
+// READELF-NEXT: TagName: audio
+// READELF-NEXT: Value: 1
+// READELF-NEXT: }
+// READELF-NEXT: Attribute {
+// READELF-NEXT: Tag: 10
+// READELF-NEXT: TagName: cabac
+// READELF-NEXT: Value: 1
+// READELF-NEXT: }
+// READELF-NEXT: }
+// READELF-NEXT: }
+// READELF-NEXT: }
+
+// OBJDUMP: 1c80e0d0 { q0 &= vcmp.gt(v0.bf,v0.bf) }
+// OBJDUMP-NEXT: 8ce0c042 { r3:2 = cround(r1:0,#0x0) }
+// OBJDUMP-NEXT: 19e8c000 { v3:0.w = vrmpyz(v0.b,r0.b) }
+// OBJDUMP-NEXT: 1d40e0c0 { v1:0.sf = vadd(v0.bf,v0.bf) }
+
+// ASM: .attribute 4, 67 // Tag_arch
+// ASM-NEXT: .attribute 5, 73 // Tag_hvx_arch
+// ASM-NEXT: .attribute 6, 1 // Tag_hvx_ieeefp
+// ASM-NEXT: .attribute 7, 1 // Tag_hvx_qfloat
+// ASM-NEXT: .attribute 8, 1 // Tag_zreg
+// ASM-NEXT: .attribute 9, 1 // Tag_audio
+// ASM-NEXT: .attribute 10, 1 // Tag_cabac
+// ASM-NEXT: .text
+// ASM-EMPTY:
+// ASM-NEXT: {
+// ASM-NEXT: q0 &= vcmp.gt(v0.bf,v0.bf)
+// ASM-NEXT: }
+// ASM-NEXT: {
+// ASM-NEXT: r3:2 = cround(r1:0,#0)
+// ASM-NEXT: }
+// ASM-NEXT: {
+// ASM-NEXT: v3:0.w = vrmpyz(v0.b,r0.b)
+// ASM-NEXT: }
+// ASM-NEXT: {
+// ASM-NEXT: v1:0.sf = vadd(v0.bf,v0.bf)
+// ASM-NEXT: }
diff --git a/llvm/test/MC/LoongArch/Macros/macros-li-bad.s b/llvm/test/MC/LoongArch/Macros/macros-li-bad.s
index 194b86bf..c880a01 100644
--- a/llvm/test/MC/LoongArch/Macros/macros-li-bad.s
+++ b/llvm/test/MC/LoongArch/Macros/macros-li-bad.s
@@ -5,3 +5,9 @@ li.w $a0, 0x100000000
li.d $a0, 0x10000000000000000
# CHECK: :[[#@LINE-1]]:11: error: unknown operand
+
+li.w $a0, non_const_val
+# CHECK: :[[#@LINE-1]]:11: error: operand must be a 32 bit immediate
+
+li.d $a0, non_const_val
+# CHECK: :[[#@LINE-1]]:11: error: operand must be a 64 bit immediate
diff --git a/llvm/test/MC/RISCV/rv32-machine-csr-names.s b/llvm/test/MC/RISCV/rv32-machine-csr-names.s
index e7a6d9c..016f448 100644
--- a/llvm/test/MC/RISCV/rv32-machine-csr-names.s
+++ b/llvm/test/MC/RISCV/rv32-machine-csr-names.s
@@ -1149,3 +1149,59 @@ csrrs t2, 0x319, zero
csrrs t1, miph, zero
# uimm12
csrrs t2, 0x354, zero
+
+################################################
+# Resumable Non-Maskable Interrupts(Smrnmi) CSRs
+################################################
+
+# mnscratch
+# name
+# CHECK-INST: csrrs t1, mnscratch, zero
+# CHECK-ENC: encoding: [0x73,0x23,0x00,0x74]
+# CHECK-INST-ALIAS: csrr t1, mnscratch
+# uimm12
+# CHECK-INST: csrrs t2, mnscratch, zero
+# CHECK-ENC: encoding: [0xf3,0x23,0x00,0x74]
+# CHECK-INST-ALIAS: csrr t2, mnscratch
+csrrs t1, mnscratch, zero
+# uimm12
+csrrs t2, 0x740, zero
+
+# mnepc
+# name
+# CHECK-INST: csrrs t1, mnepc, zero
+# CHECK-ENC: encoding: [0x73,0x23,0x10,0x74]
+# CHECK-INST-ALIAS: csrr t1, mnepc
+# uimm12
+# CHECK-INST: csrrs t2, mnepc, zero
+# CHECK-ENC: encoding: [0xf3,0x23,0x10,0x74]
+# CHECK-INST-ALIAS: csrr t2, mnepc
+csrrs t1, mnepc, zero
+# uimm12
+csrrs t2, 0x741, zero
+
+# mncause
+# name
+# CHECK-INST: csrrs t1, mncause, zero
+# CHECK-ENC: encoding: [0x73,0x23,0x20,0x74]
+# CHECK-INST-ALIAS: csrr t1, mncause
+# uimm12
+# CHECK-INST: csrrs t2, mncause, zero
+# CHECK-ENC: encoding: [0xf3,0x23,0x20,0x74]
+# CHECK-INST-ALIAS: csrr t2, mncause
+csrrs t1, mncause, zero
+# uimm12
+csrrs t2, 0x742, zero
+
+# mnstatus
+# name
+# CHECK-INST: csrrs t1, mnstatus, zero
+# CHECK-ENC: encoding: [0x73,0x23,0x40,0x74]
+# CHECK-INST-ALIAS: csrr t1, mnstatus
+# uimm12
+# CHECK-INST: csrrs t2, mnstatus, zero
+# CHECK-ENC: encoding: [0xf3,0x23,0x40,0x74]
+# CHECK-INST-ALIAS: csrr t2, mnstatus
+csrrs t1, mnstatus, zero
+# uimm12
+csrrs t2, 0x744, zero
diff --git a/llvm/test/MC/RISCV/rv32zcmp-invalid.s b/llvm/test/MC/RISCV/rv32zcmp-invalid.s
index cb99bba..1acea18 100644
--- a/llvm/test/MC/RISCV/rv32zcmp-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zcmp-invalid.s
@@ -15,3 +15,15 @@ cm.popretz {ra, s0-s10}, 112
# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
cm.popretz {ra, s0-s1}, 112
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, 16
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -32
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, -8
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -40
diff --git a/llvm/test/MC/RISCV/rv64zcmp-invalid.s b/llvm/test/MC/RISCV/rv64zcmp-invalid.s
index 1039345..bf34554 100644
--- a/llvm/test/MC/RISCV/rv64zcmp-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zcmp-invalid.s
@@ -15,3 +15,15 @@ cm.popretz {ra, s0-s10}, 112
# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
cm.popretz {ra, s0-s1}, 112
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, 16
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -32
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, -15
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -33
diff --git a/llvm/test/MC/RISCV/rvv/zvkned-invalid.s b/llvm/test/MC/RISCV/rvv/zvkned-invalid.s
new file mode 100644
index 0000000..9230bc0
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvkned-invalid.s
@@ -0,0 +1,23 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvkned %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vaesdf.vs v10, v10
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesdf.vs v10, v10
+
+vaesef.vs v11, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesef.vs v11, v11
+
+vaesdm.vs v12, v12
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesdm.vs v12, v12
+
+vaesem.vs v13, v13
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesem.vs v13, v13
+
+vaesz.vs v14, v14
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesz.vs v14, v14
+
diff --git a/llvm/test/MC/RISCV/rvv/zvknh-invalid.s b/llvm/test/MC/RISCV/rvv/zvknh-invalid.s
new file mode 100644
index 0000000..d990251
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvknh-invalid.s
@@ -0,0 +1,26 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvknha %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vsha2ms.vv v10, v10, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ms.vv v10, v10, v11
+
+vsha2ms.vv v11, v10, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ms.vv v11, v10, v11
+
+vsha2ch.vv v12, v12, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ch.vv v12, v12, v11
+
+vsha2ch.vv v11, v12, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ch.vv v11, v12, v11
+
+vsha2cl.vv v13, v13, v15
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2cl.vv v13, v13, v15
+
+vsha2cl.vv v15, v13, v15
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2cl.vv v15, v13, v15
diff --git a/llvm/test/MC/RISCV/rvv/zvksed-invalid.s b/llvm/test/MC/RISCV/rvv/zvksed-invalid.s
new file mode 100644
index 0000000..41df8d3
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvksed-invalid.s
@@ -0,0 +1,6 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvksed %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vsm4r.vs v10, v10
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsm4r.vs v10, v10
diff --git a/llvm/test/MC/RISCV/rvv/zvksh-invalid.s b/llvm/test/MC/RISCV/rvv/zvksh-invalid.s
new file mode 100644
index 0000000..cccec44
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvksh-invalid.s
@@ -0,0 +1,10 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvksh %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vsm3me.vv v10, v10, v8
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsm3me.vv v10, v10, v8
+
+vsm3c.vi v9, v9, 7
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsm3c.vi v9, v9, 7
diff --git a/llvm/test/MC/RISCV/rvv/zvksh.s b/llvm/test/MC/RISCV/rvv/zvksh.s
index ca6cb49..06251ff 100644
--- a/llvm/test/MC/RISCV/rvv/zvksh.s
+++ b/llvm/test/MC/RISCV/rvv/zvksh.s
@@ -19,3 +19,10 @@ vsm3me.vv v10, v9, v8
# CHECK-ENCODING: [0x77,0x25,0x94,0x82]
# CHECK-ERROR: instruction requires the following: 'Zvksh' (SM3 Hash Function Instructions){{$}}
# CHECK-UNKNOWN: 77 25 94 82 <unknown>
+
+# vs1 is allowed to overlap, but not vs2.
+vsm3me.vv v10, v9, v10
+# CHECK-INST: vsm3me.vv v10, v9, v10
+# CHECK-ENCODING: [0x77,0x25,0x95,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvksh' (SM3 Hash Function Instructions){{$}}
+# CHECK-UNKNOWN: 77 25 95 82 <unknown>
diff --git a/llvm/test/MC/RISCV/xsifive-invalid.s b/llvm/test/MC/RISCV/xsifive-invalid.s
new file mode 100644
index 0000000..5210d29
--- /dev/null
+++ b/llvm/test/MC/RISCV/xsifive-invalid.s
@@ -0,0 +1,20 @@
+# RUN: not llvm-mc -triple riscv32 < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv64 < %s 2>&1 | FileCheck %s
+
+sf.cflush.d.l1 0x10 # CHECK: :[[@LINE]]:16: error: invalid operand for instruction
+
+sf.cdiscard.d.l1 0x10 # CHECK: :[[@LINE]]:18: error: invalid operand for instruction
+
+sf.cflush.d.l1 x0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: 'XSiFivecflushdlone' (SiFive sf.cflush.d.l1 Instruction){{$}}
+
+sf.cflush.d.l1 x7 # CHECK: :[[@LINE]]:1: error: instruction requires the following: 'XSiFivecflushdlone' (SiFive sf.cflush.d.l1 Instruction){{$}}
+
+sf.cdiscard.d.l1 x0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: 'XSiFivecdiscarddlone' (SiFive sf.cdiscard.d.l1 Instruction){{$}}
+
+sf.cdiscard.d.l1 x7 # CHECK: :[[@LINE]]:1: error: instruction requires the following: 'XSiFivecdiscarddlone' (SiFive sf.cdiscard.d.l1 Instruction){{$}}
+
+sf.cease x1 # CHECK: :[[@LINE]]:10: error: invalid operand for instruction
+
+sf.cease 0x10 # CHECK: :[[@LINE]]:10: error: invalid operand for instruction
+
+sf.cease # CHECK: :[[@LINE]]:1: error: instruction requires the following: 'XSfcease' (SiFive sf.cease Instruction){{$}}
diff --git a/llvm/test/MC/RISCV/xsifive-valid.s b/llvm/test/MC/RISCV/xsifive-valid.s
new file mode 100644
index 0000000..8aa0ab1
--- /dev/null
+++ b/llvm/test/MC/RISCV/xsifive-valid.s
@@ -0,0 +1,36 @@
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+xsifivecdiscarddlone,+xsifivecflushdlone,+xsfcease -riscv-no-aliases -show-encoding \
+# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST %s
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+xsifivecdiscarddlone,+xsifivecflushdlone,+xsfcease -riscv-no-aliases -show-encoding \
+# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST %s
+# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+xsifivecdiscarddlone,+xsifivecflushdlone,+xsfcease < %s \
+# RUN: | llvm-objdump --mattr=+xsifivecdiscarddlone,+xsifivecflushdlone,+xsfcease -M no-aliases -d - \
+# RUN: | FileCheck -check-prefix=CHECK-INST %s
+# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+xsifivecdiscarddlone,+xsifivecflushdlone,+xsfcease < %s \
+# RUN: | llvm-objdump --mattr=+xsifivecdiscarddlone,+xsifivecflushdlone,+xsfcease -M no-aliases -d - \
+# RUN: | FileCheck -check-prefix=CHECK-INST %s
+
+# CHECK-INST: sf.cflush.d.l1 zero
+# CHECK-ENC: encoding: [0x73,0x00,0x00,0xfc]
+sf.cflush.d.l1 x0
+# CHECK-INST: sf.cflush.d.l1 zero
+# CHECK-ENC: encoding: [0x73,0x00,0x00,0xfc]
+sf.cflush.d.l1
+
+# CHECK-INST: sf.cflush.d.l1 t2
+# CHECK-ENC: encoding: [0x73,0x80,0x03,0xfc]
+sf.cflush.d.l1 x7
+
+# CHECK-INST: sf.cdiscard.d.l1 zero
+# CHECK-ENC: encoding: [0x73,0x00,0x20,0xfc]
+sf.cdiscard.d.l1 x0
+# CHECK-INST: sf.cdiscard.d.l1 zero
+# CHECK-ENC: encoding: [0x73,0x00,0x20,0xfc]
+sf.cdiscard.d.l1
+
+# CHECK-INST: sf.cdiscard.d.l1 t2
+# CHECK-ENC: encoding: [0x73,0x80,0x23,0xfc]
+sf.cdiscard.d.l1 x7
+
+# CHECK-INST: sf.cease
+# CHECK-ENC: encoding: [0x73,0x00,0x50,0x30]
+sf.cease
diff --git a/llvm/test/MC/WebAssembly/module-asm.ll b/llvm/test/MC/WebAssembly/module-asm.ll
new file mode 100644
index 0000000..d451bec
--- /dev/null
+++ b/llvm/test/MC/WebAssembly/module-asm.ll
@@ -0,0 +1,25 @@
+; Ensure that symbols from module ASM are properly exported.
+;
+; Regression test for https://github.com/llvm/llvm-project/issues/85578.
+
+; RUN: llc -mtriple=wasm32-unknown-unknown -filetype=obj %s -o - | obj2yaml | FileCheck %s
+
+module asm "test_func:"
+module asm " .globl test_func"
+module asm " .functype test_func (i32) -> (i32)"
+module asm " .export_name test_func, test_export"
+module asm " end_function"
+
+; CHECK: - Type: TYPE
+; CHECK-NEXT: Signatures:
+; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: ParamTypes:
+; CHECK-NEXT: - I32
+; CHECK-NEXT: ReturnTypes:
+; CHECK-NEXT: - I32
+
+; CHECK: - Type: EXPORT
+; CHECK-NEXT: Exports:
+; CHECK-NEXT: - Name: test_export
+; CHECK-NEXT: Kind: FUNCTION
+; CHECK-NEXT: Index: 0
diff --git a/llvm/test/MC/X86/apx/cfcmov-att.s b/llvm/test/MC/X86/apx/cfcmov-att.s
new file mode 100644
index 0000000..a6947ca
--- /dev/null
+++ b/llvm/test/MC/X86/apx/cfcmov-att.s
@@ -0,0 +1,841 @@
+# RUN: llvm-mc -triple x86_64 --show-encoding %s | FileCheck %s
+
+# CHECK: cfcmovbw %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x42,0xe9]
+ cfcmovbw %r17w, %r21w, %r25w
+
+# CHECK: cfcmovbw %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x42,0xcd]
+ cfcmovbw %r17w, %r21w
+
+# CHECK: cfcmovbw %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x42,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbw %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovbl %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x42,0xf2]
+ cfcmovbl %r18d, %r22d, %r26d
+
+# CHECK: cfcmovbl %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x42,0xd6]
+ cfcmovbl %r18d, %r22d
+
+# CHECK: cfcmovbl %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x42,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbl %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovbq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x42,0xfb]
+ cfcmovbq %r19, %r23, %r27
+
+# CHECK: cfcmovbq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x42,0xdf]
+ cfcmovbq %r19, %r23
+
+# CHECK: cfcmovbq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x42,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovbw 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x42,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbw 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovbw 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x42,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbw 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovbl 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x42,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbl 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovbl 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x42,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbl 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovbq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x42,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovbq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x42,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovbew %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x46,0xe9]
+ cfcmovbew %r17w, %r21w, %r25w
+
+# CHECK: cfcmovbew %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x46,0xcd]
+ cfcmovbew %r17w, %r21w
+
+# CHECK: cfcmovbew %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x46,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbew %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovbel %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x46,0xf2]
+ cfcmovbel %r18d, %r22d, %r26d
+
+# CHECK: cfcmovbel %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x46,0xd6]
+ cfcmovbel %r18d, %r22d
+
+# CHECK: cfcmovbel %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x46,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbel %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovbeq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x46,0xfb]
+ cfcmovbeq %r19, %r23, %r27
+
+# CHECK: cfcmovbeq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x46,0xdf]
+ cfcmovbeq %r19, %r23
+
+# CHECK: cfcmovbeq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x46,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbeq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovbew 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x46,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbew 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovbew 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x46,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbew 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovbel 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x46,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbel 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovbel 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x46,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbel 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovbeq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x46,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbeq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovbeq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x46,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbeq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovlw %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4c,0xe9]
+ cfcmovlw %r17w, %r21w, %r25w
+
+# CHECK: cfcmovlw %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4c,0xcd]
+ cfcmovlw %r17w, %r21w
+
+# CHECK: cfcmovlw %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlw %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovll %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4c,0xf2]
+ cfcmovll %r18d, %r22d, %r26d
+
+# CHECK: cfcmovll %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4c,0xd6]
+ cfcmovll %r18d, %r22d
+
+# CHECK: cfcmovll %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4c,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovll %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovlq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4c,0xfb]
+ cfcmovlq %r19, %r23, %r27
+
+# CHECK: cfcmovlq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4c,0xdf]
+ cfcmovlq %r19, %r23
+
+# CHECK: cfcmovlq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovlw 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlw 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovlw 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlw 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovll 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4c,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovll 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovll 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4c,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovll 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovlq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovlq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovlew %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4e,0xe9]
+ cfcmovlew %r17w, %r21w, %r25w
+
+# CHECK: cfcmovlew %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4e,0xcd]
+ cfcmovlew %r17w, %r21w
+
+# CHECK: cfcmovlew %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlew %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovlel %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4e,0xf2]
+ cfcmovlel %r18d, %r22d, %r26d
+
+# CHECK: cfcmovlel %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4e,0xd6]
+ cfcmovlel %r18d, %r22d
+
+# CHECK: cfcmovlel %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4e,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlel %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovleq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4e,0xfb]
+ cfcmovleq %r19, %r23, %r27
+
+# CHECK: cfcmovleq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4e,0xdf]
+ cfcmovleq %r19, %r23
+
+# CHECK: cfcmovleq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovleq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovlew 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlew 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovlew 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlew 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovlel 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4e,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlel 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovlel 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4e,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovlel 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovleq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovleq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovleq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovleq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovaew %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x43,0xe9]
+ cfcmovaew %r17w, %r21w, %r25w
+
+# CHECK: cfcmovaew %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x43,0xcd]
+ cfcmovaew %r17w, %r21w
+
+# CHECK: cfcmovaew %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x43,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaew %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovael %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x43,0xf2]
+ cfcmovael %r18d, %r22d, %r26d
+
+# CHECK: cfcmovael %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x43,0xd6]
+ cfcmovael %r18d, %r22d
+
+# CHECK: cfcmovael %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x43,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovael %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovaeq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x43,0xfb]
+ cfcmovaeq %r19, %r23, %r27
+
+# CHECK: cfcmovaeq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x43,0xdf]
+ cfcmovaeq %r19, %r23
+
+# CHECK: cfcmovaeq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x43,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaeq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovaew 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x43,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaew 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovaew 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x43,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaew 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovael 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x43,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovael 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovael 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x43,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovael 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovaeq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x43,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaeq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovaeq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x43,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaeq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovaw %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x47,0xe9]
+ cfcmovaw %r17w, %r21w, %r25w
+
+# CHECK: cfcmovaw %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x47,0xcd]
+ cfcmovaw %r17w, %r21w
+
+# CHECK: cfcmovaw %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x47,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaw %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmoval %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x47,0xf2]
+ cfcmoval %r18d, %r22d, %r26d
+
+# CHECK: cfcmoval %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x47,0xd6]
+ cfcmoval %r18d, %r22d
+
+# CHECK: cfcmoval %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x47,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmoval %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovaq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x47,0xfb]
+ cfcmovaq %r19, %r23, %r27
+
+# CHECK: cfcmovaq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x47,0xdf]
+ cfcmovaq %r19, %r23
+
+# CHECK: cfcmovaq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x47,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovaw 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x47,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaw 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovaw 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x47,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaw 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmoval 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x47,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmoval 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmoval 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x47,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmoval 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovaq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x47,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovaq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x47,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovaq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovgew %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4d,0xe9]
+ cfcmovgew %r17w, %r21w, %r25w
+
+# CHECK: cfcmovgew %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4d,0xcd]
+ cfcmovgew %r17w, %r21w
+
+# CHECK: cfcmovgew %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgew %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovgel %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4d,0xf2]
+ cfcmovgel %r18d, %r22d, %r26d
+
+# CHECK: cfcmovgel %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4d,0xd6]
+ cfcmovgel %r18d, %r22d
+
+# CHECK: cfcmovgel %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4d,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgel %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovgeq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4d,0xfb]
+ cfcmovgeq %r19, %r23, %r27
+
+# CHECK: cfcmovgeq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4d,0xdf]
+ cfcmovgeq %r19, %r23
+
+# CHECK: cfcmovgeq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgeq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovgew 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgew 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovgew 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgew 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovgel 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4d,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgel 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovgel 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4d,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgel 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovgeq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgeq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovgeq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovgeq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovnow %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x41,0xe9]
+ cfcmovnow %r17w, %r21w, %r25w
+
+# CHECK: cfcmovnow %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x41,0xcd]
+ cfcmovnow %r17w, %r21w
+
+# CHECK: cfcmovnow %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x41,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnow %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnol %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x41,0xf2]
+ cfcmovnol %r18d, %r22d, %r26d
+
+# CHECK: cfcmovnol %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x41,0xd6]
+ cfcmovnol %r18d, %r22d
+
+# CHECK: cfcmovnol %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x41,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnol %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnoq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x41,0xfb]
+ cfcmovnoq %r19, %r23, %r27
+
+# CHECK: cfcmovnoq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x41,0xdf]
+ cfcmovnoq %r19, %r23
+
+# CHECK: cfcmovnoq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x41,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnoq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnow 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x41,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnow 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovnow 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x41,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnow 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovnol 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x41,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnol 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovnol 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x41,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnol 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovnoq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x41,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnoq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovnoq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x41,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnoq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovnpw %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4b,0xe9]
+ cfcmovnpw %r17w, %r21w, %r25w
+
+# CHECK: cfcmovnpw %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4b,0xcd]
+ cfcmovnpw %r17w, %r21w
+
+# CHECK: cfcmovnpw %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpw %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnpl %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4b,0xf2]
+ cfcmovnpl %r18d, %r22d, %r26d
+
+# CHECK: cfcmovnpl %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4b,0xd6]
+ cfcmovnpl %r18d, %r22d
+
+# CHECK: cfcmovnpl %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4b,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpl %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnpq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4b,0xfb]
+ cfcmovnpq %r19, %r23, %r27
+
+# CHECK: cfcmovnpq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4b,0xdf]
+ cfcmovnpq %r19, %r23
+
+# CHECK: cfcmovnpq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnpw 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpw 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovnpw 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpw 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovnpl 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4b,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpl 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovnpl 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4b,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpl 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovnpq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovnpq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnpq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovnsw %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x49,0xe9]
+ cfcmovnsw %r17w, %r21w, %r25w
+
+# CHECK: cfcmovnsw %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x49,0xcd]
+ cfcmovnsw %r17w, %r21w
+
+# CHECK: cfcmovnsw %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x49,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsw %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnsl %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x49,0xf2]
+ cfcmovnsl %r18d, %r22d, %r26d
+
+# CHECK: cfcmovnsl %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x49,0xd6]
+ cfcmovnsl %r18d, %r22d
+
+# CHECK: cfcmovnsl %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x49,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsl %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnsq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x49,0xfb]
+ cfcmovnsq %r19, %r23, %r27
+
+# CHECK: cfcmovnsq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x49,0xdf]
+ cfcmovnsq %r19, %r23
+
+# CHECK: cfcmovnsq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x49,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnsw 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x49,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsw 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovnsw 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x49,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsw 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovnsl 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x49,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsl 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovnsl 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x49,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsl 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovnsq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x49,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovnsq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x49,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnsq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovnew %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x45,0xe9]
+ cfcmovnew %r17w, %r21w, %r25w
+
+# CHECK: cfcmovnew %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x45,0xcd]
+ cfcmovnew %r17w, %r21w
+
+# CHECK: cfcmovnew %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x45,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnew %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnel %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x45,0xf2]
+ cfcmovnel %r18d, %r22d, %r26d
+
+# CHECK: cfcmovnel %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x45,0xd6]
+ cfcmovnel %r18d, %r22d
+
+# CHECK: cfcmovnel %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x45,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnel %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovneq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x45,0xfb]
+ cfcmovneq %r19, %r23, %r27
+
+# CHECK: cfcmovneq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x45,0xdf]
+ cfcmovneq %r19, %r23
+
+# CHECK: cfcmovneq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x45,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovneq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovnew 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x45,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnew 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovnew 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x45,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnew 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovnel 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x45,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnel 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovnel 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x45,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnel 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovneq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x45,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovneq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovneq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x45,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovneq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovpw %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4a,0xe9]
+ cfcmovpw %r17w, %r21w, %r25w
+
+# CHECK: cfcmovpw %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4a,0xcd]
+ cfcmovpw %r17w, %r21w
+
+# CHECK: cfcmovpw %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpw %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovpl %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4a,0xf2]
+ cfcmovpl %r18d, %r22d, %r26d
+
+# CHECK: cfcmovpl %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4a,0xd6]
+ cfcmovpl %r18d, %r22d
+
+# CHECK: cfcmovpl %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4a,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpl %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovpq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4a,0xfb]
+ cfcmovpq %r19, %r23, %r27
+
+# CHECK: cfcmovpq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4a,0xdf]
+ cfcmovpq %r19, %r23
+
+# CHECK: cfcmovpq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovpw 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpw 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovpw 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpw 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovpl 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4a,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpl 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovpl 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4a,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpl 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovpq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovpq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovpq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovsw %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x48,0xe9]
+ cfcmovsw %r17w, %r21w, %r25w
+
+# CHECK: cfcmovsw %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x48,0xcd]
+ cfcmovsw %r17w, %r21w
+
+# CHECK: cfcmovsw %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x48,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsw %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovsl %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x48,0xf2]
+ cfcmovsl %r18d, %r22d, %r26d
+
+# CHECK: cfcmovsl %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x48,0xd6]
+ cfcmovsl %r18d, %r22d
+
+# CHECK: cfcmovsl %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x48,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsl %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmovsq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x48,0xfb]
+ cfcmovsq %r19, %r23, %r27
+
+# CHECK: cfcmovsq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x48,0xdf]
+ cfcmovsq %r19, %r23
+
+# CHECK: cfcmovsq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x48,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovsw 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x48,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsw 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovsw 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x48,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsw 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovsl 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x48,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsl 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovsl 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x48,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsl 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmovsq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x48,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmovsq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x48,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovsq 291(%r28,%r29,4), %r19
+
+# CHECK: cfcmovew %r17w, %r21w, %r25w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x44,0xe9]
+ cfcmovew %r17w, %r21w, %r25w
+
+# CHECK: cfcmovew %r17w, %r21w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x44,0xcd]
+ cfcmovew %r17w, %r21w
+
+# CHECK: cfcmovew %r17w, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x44,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovew %r17w, 291(%r28,%r29,4)
+
+# CHECK: cfcmovel %r18d, %r22d, %r26d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x44,0xf2]
+ cfcmovel %r18d, %r22d, %r26d
+
+# CHECK: cfcmovel %r18d, %r22d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x44,0xd6]
+ cfcmovel %r18d, %r22d
+
+# CHECK: cfcmovel %r18d, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x44,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovel %r18d, 291(%r28,%r29,4)
+
+# CHECK: cfcmoveq %r19, %r23, %r27
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x44,0xfb]
+ cfcmoveq %r19, %r23, %r27
+
+# CHECK: cfcmoveq %r19, %r23
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x44,0xdf]
+ cfcmoveq %r19, %r23
+
+# CHECK: cfcmoveq %r19, 291(%r28,%r29,4)
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x44,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmoveq %r19, 291(%r28,%r29,4)
+
+# CHECK: cfcmovew 291(%r28,%r29,4), %r17w, %r21w
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x44,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovew 291(%r28,%r29,4), %r17w, %r21w
+
+# CHECK: cfcmovew 291(%r28,%r29,4), %r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x44,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovew 291(%r28,%r29,4), %r17w
+
+# CHECK: cfcmovel 291(%r28,%r29,4), %r18d, %r22d
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x44,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovel 291(%r28,%r29,4), %r18d, %r22d
+
+# CHECK: cfcmovel 291(%r28,%r29,4), %r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x44,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovel 291(%r28,%r29,4), %r18d
+
+# CHECK: cfcmoveq 291(%r28,%r29,4), %r19, %r23
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x44,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmoveq 291(%r28,%r29,4), %r19, %r23
+
+# CHECK: cfcmoveq 291(%r28,%r29,4), %r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x44,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmoveq 291(%r28,%r29,4), %r19
diff --git a/llvm/test/MC/X86/apx/cfcmov-intel.s b/llvm/test/MC/X86/apx/cfcmov-intel.s
new file mode 100644
index 0000000..58b145d
--- /dev/null
+++ b/llvm/test/MC/X86/apx/cfcmov-intel.s
@@ -0,0 +1,841 @@
+# RUN: llvm-mc -triple x86_64 -x86-asm-syntax=intel -output-asm-variant=1 --show-encoding %s | FileCheck %s
+
+# CHECK: cfcmovb r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x42,0xe9]
+ cfcmovb r25w, r21w, r17w
+
+# CHECK: cfcmovb r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x42,0xcd]
+ cfcmovb r21w, r17w
+
+# CHECK: cfcmovb word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x42,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovb r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x42,0xf2]
+ cfcmovb r26d, r22d, r18d
+
+# CHECK: cfcmovb r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x42,0xd6]
+ cfcmovb r22d, r18d
+
+# CHECK: cfcmovb dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x42,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovb r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x42,0xfb]
+ cfcmovb r27, r23, r19
+
+# CHECK: cfcmovb r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x42,0xdf]
+ cfcmovb r23, r19
+
+# CHECK: cfcmovb qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x42,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovb r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x42,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovb r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x42,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovb r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x42,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovb r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x42,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovb r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x42,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovb r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x42,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovb r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovbe r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x46,0xe9]
+ cfcmovbe r25w, r21w, r17w
+
+# CHECK: cfcmovbe r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x46,0xcd]
+ cfcmovbe r21w, r17w
+
+# CHECK: cfcmovbe word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x46,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovbe r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x46,0xf2]
+ cfcmovbe r26d, r22d, r18d
+
+# CHECK: cfcmovbe r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x46,0xd6]
+ cfcmovbe r22d, r18d
+
+# CHECK: cfcmovbe dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x46,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovbe r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x46,0xfb]
+ cfcmovbe r27, r23, r19
+
+# CHECK: cfcmovbe r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x46,0xdf]
+ cfcmovbe r23, r19
+
+# CHECK: cfcmovbe qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x46,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovbe r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x46,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovbe r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x46,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovbe r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x46,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovbe r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x46,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovbe r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x46,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovbe r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x46,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovbe r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovl r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4c,0xe9]
+ cfcmovl r25w, r21w, r17w
+
+# CHECK: cfcmovl r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4c,0xcd]
+ cfcmovl r21w, r17w
+
+# CHECK: cfcmovl word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovl r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4c,0xf2]
+ cfcmovl r26d, r22d, r18d
+
+# CHECK: cfcmovl r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4c,0xd6]
+ cfcmovl r22d, r18d
+
+# CHECK: cfcmovl dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4c,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovl r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4c,0xfb]
+ cfcmovl r27, r23, r19
+
+# CHECK: cfcmovl r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4c,0xdf]
+ cfcmovl r23, r19
+
+# CHECK: cfcmovl qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovl r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovl r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4c,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovl r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4c,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovl r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4c,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovl r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovl r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4c,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovl r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovle r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4e,0xe9]
+ cfcmovle r25w, r21w, r17w
+
+# CHECK: cfcmovle r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4e,0xcd]
+ cfcmovle r21w, r17w
+
+# CHECK: cfcmovle word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovle r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4e,0xf2]
+ cfcmovle r26d, r22d, r18d
+
+# CHECK: cfcmovle r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4e,0xd6]
+ cfcmovle r22d, r18d
+
+# CHECK: cfcmovle dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4e,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovle r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4e,0xfb]
+ cfcmovle r27, r23, r19
+
+# CHECK: cfcmovle r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4e,0xdf]
+ cfcmovle r23, r19
+
+# CHECK: cfcmovle qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovle r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovle r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4e,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovle r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4e,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovle r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4e,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovle r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovle r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4e,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovle r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovae r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x43,0xe9]
+ cfcmovae r25w, r21w, r17w
+
+# CHECK: cfcmovae r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x43,0xcd]
+ cfcmovae r21w, r17w
+
+# CHECK: cfcmovae word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x43,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovae r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x43,0xf2]
+ cfcmovae r26d, r22d, r18d
+
+# CHECK: cfcmovae r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x43,0xd6]
+ cfcmovae r22d, r18d
+
+# CHECK: cfcmovae dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x43,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovae r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x43,0xfb]
+ cfcmovae r27, r23, r19
+
+# CHECK: cfcmovae r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x43,0xdf]
+ cfcmovae r23, r19
+
+# CHECK: cfcmovae qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x43,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovae r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x43,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovae r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x43,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovae r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x43,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovae r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x43,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovae r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x43,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovae r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x43,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovae r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmova r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x47,0xe9]
+ cfcmova r25w, r21w, r17w
+
+# CHECK: cfcmova r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x47,0xcd]
+ cfcmova r21w, r17w
+
+# CHECK: cfcmova word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x47,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmova word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmova r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x47,0xf2]
+ cfcmova r26d, r22d, r18d
+
+# CHECK: cfcmova r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x47,0xd6]
+ cfcmova r22d, r18d
+
+# CHECK: cfcmova dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x47,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmova dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmova r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x47,0xfb]
+ cfcmova r27, r23, r19
+
+# CHECK: cfcmova r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x47,0xdf]
+ cfcmova r23, r19
+
+# CHECK: cfcmova qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x47,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmova qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmova r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x47,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmova r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmova r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x47,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmova r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmova r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x47,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmova r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmova r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x47,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmova r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmova r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x47,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmova r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmova r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x47,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmova r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovge r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4d,0xe9]
+ cfcmovge r25w, r21w, r17w
+
+# CHECK: cfcmovge r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4d,0xcd]
+ cfcmovge r21w, r17w
+
+# CHECK: cfcmovge word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovge r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4d,0xf2]
+ cfcmovge r26d, r22d, r18d
+
+# CHECK: cfcmovge r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4d,0xd6]
+ cfcmovge r22d, r18d
+
+# CHECK: cfcmovge dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4d,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovge r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4d,0xfb]
+ cfcmovge r27, r23, r19
+
+# CHECK: cfcmovge r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4d,0xdf]
+ cfcmovge r23, r19
+
+# CHECK: cfcmovge qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovge r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovge r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4d,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovge r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4d,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovge r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4d,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovge r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovge r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4d,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovge r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovno r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x41,0xe9]
+ cfcmovno r25w, r21w, r17w
+
+# CHECK: cfcmovno r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x41,0xcd]
+ cfcmovno r21w, r17w
+
+# CHECK: cfcmovno word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x41,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovno r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x41,0xf2]
+ cfcmovno r26d, r22d, r18d
+
+# CHECK: cfcmovno r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x41,0xd6]
+ cfcmovno r22d, r18d
+
+# CHECK: cfcmovno dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x41,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovno r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x41,0xfb]
+ cfcmovno r27, r23, r19
+
+# CHECK: cfcmovno r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x41,0xdf]
+ cfcmovno r23, r19
+
+# CHECK: cfcmovno qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x41,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovno r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x41,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovno r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x41,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovno r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x41,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovno r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x41,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovno r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x41,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovno r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x41,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovno r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovnp r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4b,0xe9]
+ cfcmovnp r25w, r21w, r17w
+
+# CHECK: cfcmovnp r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4b,0xcd]
+ cfcmovnp r21w, r17w
+
+# CHECK: cfcmovnp word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovnp r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4b,0xf2]
+ cfcmovnp r26d, r22d, r18d
+
+# CHECK: cfcmovnp r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4b,0xd6]
+ cfcmovnp r22d, r18d
+
+# CHECK: cfcmovnp dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4b,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovnp r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4b,0xfb]
+ cfcmovnp r27, r23, r19
+
+# CHECK: cfcmovnp r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4b,0xdf]
+ cfcmovnp r23, r19
+
+# CHECK: cfcmovnp qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovnp r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovnp r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4b,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovnp r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4b,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovnp r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4b,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovnp r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovnp r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4b,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovnp r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovns r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x49,0xe9]
+ cfcmovns r25w, r21w, r17w
+
+# CHECK: cfcmovns r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x49,0xcd]
+ cfcmovns r21w, r17w
+
+# CHECK: cfcmovns word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x49,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovns r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x49,0xf2]
+ cfcmovns r26d, r22d, r18d
+
+# CHECK: cfcmovns r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x49,0xd6]
+ cfcmovns r22d, r18d
+
+# CHECK: cfcmovns dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x49,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovns r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x49,0xfb]
+ cfcmovns r27, r23, r19
+
+# CHECK: cfcmovns r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x49,0xdf]
+ cfcmovns r23, r19
+
+# CHECK: cfcmovns qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x49,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovns r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x49,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovns r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x49,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovns r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x49,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovns r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x49,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovns r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x49,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovns r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x49,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovns r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovne r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x45,0xe9]
+ cfcmovne r25w, r21w, r17w
+
+# CHECK: cfcmovne r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x45,0xcd]
+ cfcmovne r21w, r17w
+
+# CHECK: cfcmovne word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x45,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovne r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x45,0xf2]
+ cfcmovne r26d, r22d, r18d
+
+# CHECK: cfcmovne r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x45,0xd6]
+ cfcmovne r22d, r18d
+
+# CHECK: cfcmovne dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x45,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovne r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x45,0xfb]
+ cfcmovne r27, r23, r19
+
+# CHECK: cfcmovne r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x45,0xdf]
+ cfcmovne r23, r19
+
+# CHECK: cfcmovne qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x45,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovne r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x45,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovne r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x45,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovne r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x45,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovne r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x45,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovne r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x45,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovne r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x45,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovne r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovp r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x4a,0xe9]
+ cfcmovp r25w, r21w, r17w
+
+# CHECK: cfcmovp r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x4a,0xcd]
+ cfcmovp r21w, r17w
+
+# CHECK: cfcmovp word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovp r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x4a,0xf2]
+ cfcmovp r26d, r22d, r18d
+
+# CHECK: cfcmovp r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x4a,0xd6]
+ cfcmovp r22d, r18d
+
+# CHECK: cfcmovp dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x4a,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovp r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x4a,0xfb]
+ cfcmovp r27, r23, r19
+
+# CHECK: cfcmovp r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x4a,0xdf]
+ cfcmovp r23, r19
+
+# CHECK: cfcmovp qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovp r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovp r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x4a,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovp r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x4a,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovp r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x4a,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovp r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovp r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x4a,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovp r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovs r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x48,0xe9]
+ cfcmovs r25w, r21w, r17w
+
+# CHECK: cfcmovs r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x48,0xcd]
+ cfcmovs r21w, r17w
+
+# CHECK: cfcmovs word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x48,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmovs r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x48,0xf2]
+ cfcmovs r26d, r22d, r18d
+
+# CHECK: cfcmovs r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x48,0xd6]
+ cfcmovs r22d, r18d
+
+# CHECK: cfcmovs dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x48,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmovs r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x48,0xfb]
+ cfcmovs r27, r23, r19
+
+# CHECK: cfcmovs r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x48,0xdf]
+ cfcmovs r23, r19
+
+# CHECK: cfcmovs qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x48,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmovs r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x48,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovs r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x48,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovs r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x48,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovs r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x48,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovs r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x48,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmovs r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x48,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmovs r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmove r25w, r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x35,0x14,0x44,0xe9]
+ cfcmove r25w, r21w, r17w
+
+# CHECK: cfcmove r21w, r17w
+# CHECK: encoding: [0x62,0xec,0x7d,0x0c,0x44,0xcd]
+ cfcmove r21w, r17w
+
+# CHECK: cfcmove word ptr [r28 + 4*r29 + 291], r17w
+# CHECK: encoding: [0x62,0x8c,0x79,0x0c,0x44,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmove word ptr [r28 + 4*r29 + 291], r17w
+
+# CHECK: cfcmove r26d, r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x2c,0x14,0x44,0xf2]
+ cfcmove r26d, r22d, r18d
+
+# CHECK: cfcmove r22d, r18d
+# CHECK: encoding: [0x62,0xec,0x7c,0x0c,0x44,0xd6]
+ cfcmove r22d, r18d
+
+# CHECK: cfcmove dword ptr [r28 + 4*r29 + 291], r18d
+# CHECK: encoding: [0x62,0x8c,0x78,0x0c,0x44,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmove dword ptr [r28 + 4*r29 + 291], r18d
+
+# CHECK: cfcmove r27, r23, r19
+# CHECK: encoding: [0x62,0xec,0xa4,0x14,0x44,0xfb]
+ cfcmove r27, r23, r19
+
+# CHECK: cfcmove r23, r19
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x44,0xdf]
+ cfcmove r23, r19
+
+# CHECK: cfcmove qword ptr [r28 + 4*r29 + 291], r19
+# CHECK: encoding: [0x62,0x8c,0xf8,0x0c,0x44,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmove qword ptr [r28 + 4*r29 + 291], r19
+
+# CHECK: cfcmove r21w, r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x51,0x14,0x44,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmove r21w, r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmove r17w, word ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x79,0x08,0x44,0x8c,0xac,0x23,0x01,0x00,0x00]
+ cfcmove r17w, word ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmove r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x48,0x14,0x44,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmove r22d, r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmove r18d, dword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0x78,0x08,0x44,0x94,0xac,0x23,0x01,0x00,0x00]
+ cfcmove r18d, dword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmove r23, r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xc0,0x14,0x44,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmove r23, r19, qword ptr [r28 + 4*r29 + 291]
+
+# CHECK: cfcmove r19, qword ptr [r28 + 4*r29 + 291]
+# CHECK: encoding: [0x62,0x8c,0xf8,0x08,0x44,0x9c,0xac,0x23,0x01,0x00,0x00]
+ cfcmove r19, qword ptr [r28 + 4*r29 + 291]
diff --git a/llvm/test/MC/X86/apx/cmov-att.s b/llvm/test/MC/X86/apx/cmov-att.s
new file mode 100644
index 0000000..4b8e678
--- /dev/null
+++ b/llvm/test/MC/X86/apx/cmov-att.s
@@ -0,0 +1,293 @@
+# RUN: llvm-mc -triple x86_64 -show-encoding %s | FileCheck %s
+# RUN: not llvm-mc -triple i386 -show-encoding %s 2>&1 | FileCheck %s --check-prefix=ERROR
+
+# ERROR-COUNT-96: error:
+# ERROR-NOT: error:
+# CHECK: cmovbw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x42,0xc2]
+ cmovbw %dx, %ax, %r9w
+# CHECK: cmovbl %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x42,0xd1]
+ cmovbl %ecx, %edx, %r10d
+# CHECK: cmovbq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x42,0xf9]
+ cmovbq %r9, %r15, %r11
+# CHECK: cmovbw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x42,0x54,0x80,0x7b]
+ cmovbw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovbl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x42,0x4c,0x80,0x7b]
+ cmovbl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovbq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x42,0x4c,0x80,0x7b]
+ cmovbq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovbew %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x46,0xc2]
+ cmovbew %dx, %ax, %r9w
+# CHECK: cmovbel %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x46,0xd1]
+ cmovbel %ecx, %edx, %r10d
+# CHECK: cmovbeq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x46,0xf9]
+ cmovbeq %r9, %r15, %r11
+# CHECK: cmovbew 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x46,0x54,0x80,0x7b]
+ cmovbew 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovbel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x46,0x4c,0x80,0x7b]
+ cmovbel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovbeq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x46,0x4c,0x80,0x7b]
+ cmovbeq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovlw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4c,0xc2]
+ cmovlw %dx, %ax, %r9w
+# CHECK: cmovll %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4c,0xd1]
+ cmovll %ecx, %edx, %r10d
+# CHECK: cmovlq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4c,0xf9]
+ cmovlq %r9, %r15, %r11
+# CHECK: cmovlw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4c,0x54,0x80,0x7b]
+ cmovlw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovll 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4c,0x4c,0x80,0x7b]
+ cmovll 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovlq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4c,0x4c,0x80,0x7b]
+ cmovlq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovlew %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4e,0xc2]
+ cmovlew %dx, %ax, %r9w
+# CHECK: cmovlel %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4e,0xd1]
+ cmovlel %ecx, %edx, %r10d
+# CHECK: cmovleq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4e,0xf9]
+ cmovleq %r9, %r15, %r11
+# CHECK: cmovlew 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4e,0x54,0x80,0x7b]
+ cmovlew 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovlel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4e,0x4c,0x80,0x7b]
+ cmovlel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovleq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4e,0x4c,0x80,0x7b]
+ cmovleq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovaew %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x43,0xc2]
+ cmovaew %dx, %ax, %r9w
+# CHECK: cmovael %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x43,0xd1]
+ cmovael %ecx, %edx, %r10d
+# CHECK: cmovaeq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x43,0xf9]
+ cmovaeq %r9, %r15, %r11
+# CHECK: cmovaew 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x43,0x54,0x80,0x7b]
+ cmovaew 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovael 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x43,0x4c,0x80,0x7b]
+ cmovael 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovaeq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x43,0x4c,0x80,0x7b]
+ cmovaeq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovaw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x47,0xc2]
+ cmovaw %dx, %ax, %r9w
+# CHECK: cmoval %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x47,0xd1]
+ cmoval %ecx, %edx, %r10d
+# CHECK: cmovaq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x47,0xf9]
+ cmovaq %r9, %r15, %r11
+# CHECK: cmovaw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x47,0x54,0x80,0x7b]
+ cmovaw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmoval 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x47,0x4c,0x80,0x7b]
+ cmoval 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovaq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x47,0x4c,0x80,0x7b]
+ cmovaq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovgew %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4d,0xc2]
+ cmovgew %dx, %ax, %r9w
+# CHECK: cmovgel %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4d,0xd1]
+ cmovgel %ecx, %edx, %r10d
+# CHECK: cmovgeq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4d,0xf9]
+ cmovgeq %r9, %r15, %r11
+# CHECK: cmovgew 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4d,0x54,0x80,0x7b]
+ cmovgew 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovgel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4d,0x4c,0x80,0x7b]
+ cmovgel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovgeq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4d,0x4c,0x80,0x7b]
+ cmovgeq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovgw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4f,0xc2]
+ cmovgw %dx, %ax, %r9w
+# CHECK: cmovgl %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4f,0xd1]
+ cmovgl %ecx, %edx, %r10d
+# CHECK: cmovgq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4f,0xf9]
+ cmovgq %r9, %r15, %r11
+# CHECK: cmovgw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4f,0x54,0x80,0x7b]
+ cmovgw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovgl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4f,0x4c,0x80,0x7b]
+ cmovgl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovgq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4f,0x4c,0x80,0x7b]
+ cmovgq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovnow %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x41,0xc2]
+ cmovnow %dx, %ax, %r9w
+# CHECK: cmovnol %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x41,0xd1]
+ cmovnol %ecx, %edx, %r10d
+# CHECK: cmovnoq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x41,0xf9]
+ cmovnoq %r9, %r15, %r11
+# CHECK: cmovnow 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x41,0x54,0x80,0x7b]
+ cmovnow 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovnol 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x41,0x4c,0x80,0x7b]
+ cmovnol 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovnoq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x41,0x4c,0x80,0x7b]
+ cmovnoq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovnpw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4b,0xc2]
+ cmovnpw %dx, %ax, %r9w
+# CHECK: cmovnpl %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4b,0xd1]
+ cmovnpl %ecx, %edx, %r10d
+# CHECK: cmovnpq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4b,0xf9]
+ cmovnpq %r9, %r15, %r11
+# CHECK: cmovnpw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4b,0x54,0x80,0x7b]
+ cmovnpw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovnpl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4b,0x4c,0x80,0x7b]
+ cmovnpl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovnpq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4b,0x4c,0x80,0x7b]
+ cmovnpq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovnsw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x49,0xc2]
+ cmovnsw %dx, %ax, %r9w
+# CHECK: cmovnsl %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x49,0xd1]
+ cmovnsl %ecx, %edx, %r10d
+# CHECK: cmovnsq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x49,0xf9]
+ cmovnsq %r9, %r15, %r11
+# CHECK: cmovnsw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x49,0x54,0x80,0x7b]
+ cmovnsw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovnsl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x49,0x4c,0x80,0x7b]
+ cmovnsl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovnsq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x49,0x4c,0x80,0x7b]
+ cmovnsq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovnew %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x45,0xc2]
+ cmovnew %dx, %ax, %r9w
+# CHECK: cmovnel %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x45,0xd1]
+ cmovnel %ecx, %edx, %r10d
+# CHECK: cmovneq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x45,0xf9]
+ cmovneq %r9, %r15, %r11
+# CHECK: cmovnew 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x45,0x54,0x80,0x7b]
+ cmovnew 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovnel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x45,0x4c,0x80,0x7b]
+ cmovnel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovneq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x45,0x4c,0x80,0x7b]
+ cmovneq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovow %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x40,0xc2]
+ cmovow %dx, %ax, %r9w
+# CHECK: cmovol %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x40,0xd1]
+ cmovol %ecx, %edx, %r10d
+# CHECK: cmovoq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x40,0xf9]
+ cmovoq %r9, %r15, %r11
+# CHECK: cmovow 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x40,0x54,0x80,0x7b]
+ cmovow 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovol 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x40,0x4c,0x80,0x7b]
+ cmovol 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovoq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x40,0x4c,0x80,0x7b]
+ cmovoq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovpw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4a,0xc2]
+ cmovpw %dx, %ax, %r9w
+# CHECK: cmovpl %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4a,0xd1]
+ cmovpl %ecx, %edx, %r10d
+# CHECK: cmovpq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4a,0xf9]
+ cmovpq %r9, %r15, %r11
+# CHECK: cmovpw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4a,0x54,0x80,0x7b]
+ cmovpw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovpl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4a,0x4c,0x80,0x7b]
+ cmovpl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovpq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4a,0x4c,0x80,0x7b]
+ cmovpq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovsw %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x48,0xc2]
+ cmovsw %dx, %ax, %r9w
+# CHECK: cmovsl %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x48,0xd1]
+ cmovsl %ecx, %edx, %r10d
+# CHECK: cmovsq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x48,0xf9]
+ cmovsq %r9, %r15, %r11
+# CHECK: cmovsw 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x48,0x54,0x80,0x7b]
+ cmovsw 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovsl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x48,0x4c,0x80,0x7b]
+ cmovsl 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmovsq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x48,0x4c,0x80,0x7b]
+ cmovsq 123(%r8,%rax,4), %r9, %r15
+# CHECK: cmovew %dx, %ax, %r9w
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x44,0xc2]
+ cmovew %dx, %ax, %r9w
+# CHECK: cmovel %ecx, %edx, %r10d
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x44,0xd1]
+ cmovel %ecx, %edx, %r10d
+# CHECK: cmoveq %r9, %r15, %r11
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x44,0xf9]
+ cmoveq %r9, %r15, %r11
+# CHECK: cmovew 123(%r8,%rax,4), %dx, %ax
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x44,0x54,0x80,0x7b]
+ cmovew 123(%r8,%rax,4), %dx, %ax
+# CHECK: cmovel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x44,0x4c,0x80,0x7b]
+ cmovel 123(%r8,%rax,4), %ecx, %edx
+# CHECK: cmoveq 123(%r8,%rax,4), %r9, %r15
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x44,0x4c,0x80,0x7b]
+ cmoveq 123(%r8,%rax,4), %r9, %r15
diff --git a/llvm/test/MC/X86/apx/cmov-intel.s b/llvm/test/MC/X86/apx/cmov-intel.s
new file mode 100644
index 0000000..f481346
--- /dev/null
+++ b/llvm/test/MC/X86/apx/cmov-intel.s
@@ -0,0 +1,290 @@
+# RUN: llvm-mc -triple x86_64 -show-encoding -x86-asm-syntax=intel -output-asm-variant=1 %s | FileCheck %s
+
+# CHECK: cmovb r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x42,0xc2]
+ cmovb r9w, ax, dx
+# CHECK: cmovb r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x42,0xd1]
+ cmovb r10d, edx, ecx
+# CHECK: cmovb r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x42,0xf9]
+ cmovb r11, r15, r9
+# CHECK: cmovb ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x42,0x54,0x80,0x7b]
+ cmovb ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovb edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x42,0x4c,0x80,0x7b]
+ cmovb edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovb r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x42,0x4c,0x80,0x7b]
+ cmovb r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovbe r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x46,0xc2]
+ cmovbe r9w, ax, dx
+# CHECK: cmovbe r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x46,0xd1]
+ cmovbe r10d, edx, ecx
+# CHECK: cmovbe r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x46,0xf9]
+ cmovbe r11, r15, r9
+# CHECK: cmovbe ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x46,0x54,0x80,0x7b]
+ cmovbe ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovbe edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x46,0x4c,0x80,0x7b]
+ cmovbe edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovbe r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x46,0x4c,0x80,0x7b]
+ cmovbe r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovl r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4c,0xc2]
+ cmovl r9w, ax, dx
+# CHECK: cmovl r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4c,0xd1]
+ cmovl r10d, edx, ecx
+# CHECK: cmovl r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4c,0xf9]
+ cmovl r11, r15, r9
+# CHECK: cmovl ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4c,0x54,0x80,0x7b]
+ cmovl ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovl edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4c,0x4c,0x80,0x7b]
+ cmovl edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovl r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4c,0x4c,0x80,0x7b]
+ cmovl r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovle r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4e,0xc2]
+ cmovle r9w, ax, dx
+# CHECK: cmovle r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4e,0xd1]
+ cmovle r10d, edx, ecx
+# CHECK: cmovle r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4e,0xf9]
+ cmovle r11, r15, r9
+# CHECK: cmovle ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4e,0x54,0x80,0x7b]
+ cmovle ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovle edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4e,0x4c,0x80,0x7b]
+ cmovle edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovle r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4e,0x4c,0x80,0x7b]
+ cmovle r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovae r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x43,0xc2]
+ cmovae r9w, ax, dx
+# CHECK: cmovae r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x43,0xd1]
+ cmovae r10d, edx, ecx
+# CHECK: cmovae r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x43,0xf9]
+ cmovae r11, r15, r9
+# CHECK: cmovae ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x43,0x54,0x80,0x7b]
+ cmovae ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovae edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x43,0x4c,0x80,0x7b]
+ cmovae edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovae r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x43,0x4c,0x80,0x7b]
+ cmovae r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmova r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x47,0xc2]
+ cmova r9w, ax, dx
+# CHECK: cmova r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x47,0xd1]
+ cmova r10d, edx, ecx
+# CHECK: cmova r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x47,0xf9]
+ cmova r11, r15, r9
+# CHECK: cmova ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x47,0x54,0x80,0x7b]
+ cmova ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmova edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x47,0x4c,0x80,0x7b]
+ cmova edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmova r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x47,0x4c,0x80,0x7b]
+ cmova r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovge r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4d,0xc2]
+ cmovge r9w, ax, dx
+# CHECK: cmovge r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4d,0xd1]
+ cmovge r10d, edx, ecx
+# CHECK: cmovge r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4d,0xf9]
+ cmovge r11, r15, r9
+# CHECK: cmovge ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4d,0x54,0x80,0x7b]
+ cmovge ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovge edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4d,0x4c,0x80,0x7b]
+ cmovge edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovge r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4d,0x4c,0x80,0x7b]
+ cmovge r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovg r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4f,0xc2]
+ cmovg r9w, ax, dx
+# CHECK: cmovg r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4f,0xd1]
+ cmovg r10d, edx, ecx
+# CHECK: cmovg r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4f,0xf9]
+ cmovg r11, r15, r9
+# CHECK: cmovg ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4f,0x54,0x80,0x7b]
+ cmovg ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovg edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4f,0x4c,0x80,0x7b]
+ cmovg edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovg r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4f,0x4c,0x80,0x7b]
+ cmovg r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovno r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x41,0xc2]
+ cmovno r9w, ax, dx
+# CHECK: cmovno r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x41,0xd1]
+ cmovno r10d, edx, ecx
+# CHECK: cmovno r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x41,0xf9]
+ cmovno r11, r15, r9
+# CHECK: cmovno ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x41,0x54,0x80,0x7b]
+ cmovno ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovno edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x41,0x4c,0x80,0x7b]
+ cmovno edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovno r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x41,0x4c,0x80,0x7b]
+ cmovno r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovnp r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4b,0xc2]
+ cmovnp r9w, ax, dx
+# CHECK: cmovnp r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4b,0xd1]
+ cmovnp r10d, edx, ecx
+# CHECK: cmovnp r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4b,0xf9]
+ cmovnp r11, r15, r9
+# CHECK: cmovnp ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4b,0x54,0x80,0x7b]
+ cmovnp ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovnp edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4b,0x4c,0x80,0x7b]
+ cmovnp edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovnp r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4b,0x4c,0x80,0x7b]
+ cmovnp r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovns r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x49,0xc2]
+ cmovns r9w, ax, dx
+# CHECK: cmovns r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x49,0xd1]
+ cmovns r10d, edx, ecx
+# CHECK: cmovns r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x49,0xf9]
+ cmovns r11, r15, r9
+# CHECK: cmovns ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x49,0x54,0x80,0x7b]
+ cmovns ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovns edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x49,0x4c,0x80,0x7b]
+ cmovns edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovns r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x49,0x4c,0x80,0x7b]
+ cmovns r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovne r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x45,0xc2]
+ cmovne r9w, ax, dx
+# CHECK: cmovne r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x45,0xd1]
+ cmovne r10d, edx, ecx
+# CHECK: cmovne r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x45,0xf9]
+ cmovne r11, r15, r9
+# CHECK: cmovne ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x45,0x54,0x80,0x7b]
+ cmovne ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovne edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x45,0x4c,0x80,0x7b]
+ cmovne edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovne r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x45,0x4c,0x80,0x7b]
+ cmovne r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovo r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x40,0xc2]
+ cmovo r9w, ax, dx
+# CHECK: cmovo r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x40,0xd1]
+ cmovo r10d, edx, ecx
+# CHECK: cmovo r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x40,0xf9]
+ cmovo r11, r15, r9
+# CHECK: cmovo ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x40,0x54,0x80,0x7b]
+ cmovo ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovo edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x40,0x4c,0x80,0x7b]
+ cmovo edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovo r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x40,0x4c,0x80,0x7b]
+ cmovo r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovp r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x4a,0xc2]
+ cmovp r9w, ax, dx
+# CHECK: cmovp r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x4a,0xd1]
+ cmovp r10d, edx, ecx
+# CHECK: cmovp r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x4a,0xf9]
+ cmovp r11, r15, r9
+# CHECK: cmovp ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x4a,0x54,0x80,0x7b]
+ cmovp ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovp edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x4a,0x4c,0x80,0x7b]
+ cmovp edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovp r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x4a,0x4c,0x80,0x7b]
+ cmovp r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmovs r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x48,0xc2]
+ cmovs r9w, ax, dx
+# CHECK: cmovs r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x48,0xd1]
+ cmovs r10d, edx, ecx
+# CHECK: cmovs r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x48,0xf9]
+ cmovs r11, r15, r9
+# CHECK: cmovs ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x48,0x54,0x80,0x7b]
+ cmovs ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmovs edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x48,0x4c,0x80,0x7b]
+ cmovs edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmovs r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x48,0x4c,0x80,0x7b]
+ cmovs r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: cmove r9w, ax, dx
+# CHECK: encoding: [0x62,0xf4,0x35,0x18,0x44,0xc2]
+ cmove r9w, ax, dx
+# CHECK: cmove r10d, edx, ecx
+# CHECK: encoding: [0x62,0xf4,0x2c,0x18,0x44,0xd1]
+ cmove r10d, edx, ecx
+# CHECK: cmove r11, r15, r9
+# CHECK: encoding: [0x62,0x54,0xa4,0x18,0x44,0xf9]
+ cmove r11, r15, r9
+# CHECK: cmove ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x44,0x54,0x80,0x7b]
+ cmove ax, dx, word ptr [r8 + 4*rax + 123]
+# CHECK: cmove edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0xd4,0x6c,0x18,0x44,0x4c,0x80,0x7b]
+ cmove edx, ecx, dword ptr [r8 + 4*rax + 123]
+# CHECK: cmove r15, r9, qword ptr [r8 + 4*rax + 123]
+# CHECK: encoding: [0x62,0x54,0x84,0x18,0x44,0x4c,0x80,0x7b]
+ cmove r15, r9, qword ptr [r8 + 4*rax + 123]
diff --git a/llvm/test/MC/X86/apx/evex-format-att.s b/llvm/test/MC/X86/apx/evex-format-att.s
index 055a29f..36df3f3 100644
--- a/llvm/test/MC/X86/apx/evex-format-att.s
+++ b/llvm/test/MC/X86/apx/evex-format-att.s
@@ -10,6 +10,12 @@
# CHECK: encoding: [0x62,0xec,0xec,0x10,0x01,0x41,0x7b]
addq %r16, 123(%r17), %r18
+## MRMDestMemCC
+
+# CHECK: cfcmovbq %r16, 123(%r17,%r18,4)
+# CHECK: encoding: [0x62,0xec,0xf8,0x0c,0x42,0x44,0x91,0x7b]
+ cfcmovbq %r16, 123(%r17,%r18,4)
+
## MRMSrcMem
# CHECK: vbroadcasti32x4 (%r16,%r17), %zmm0
@@ -20,6 +26,16 @@
# CHECK: encoding: [0x62,0xec,0xec,0x10,0x2b,0x48,0x7b]
subq 123(%r16), %r17, %r18
+## MRMSrcMemCC
+
+# CHECK: cfcmovbq 123(%r16,%r17,4), %r18
+# CHECK: encoding: [0x62,0xec,0xf8,0x08,0x42,0x54,0x88,0x7b]
+ cfcmovbq 123(%r16,%r17,4), %r18
+
+# CHECK: cfcmovbq 123(%r16,%r17,4), %r18, %r19
+# CHECK: encoding: [0x62,0xec,0xe0,0x14,0x42,0x54,0x88,0x7b]
+ cfcmovbq 123(%r16,%r17,4), %r18, %r19
+
## MRM0m
# CHECK: vprorq $0, (%r16,%r17), %zmm0
@@ -122,12 +138,24 @@
# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x01,0xc1]
{nf} addq %r16, %r17
+## MRMDestRegCC
+
+# CHECK: cfcmovbq %r16, %r17
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x42,0xc1]
+ cfcmovbq %r16, %r17
+
## MRMSrcReg
# CHECK: mulxq %r16, %r17, %r18
# CHECK: encoding: [0x62,0xea,0xf7,0x00,0xf6,0xd0]
mulxq %r16, %r17, %r18
+## MRMSrcRegCC
+
+# CHECK: cfcmovbq %r16, %r17, %r18
+# CHECK: encoding: [0x62,0xec,0xec,0x14,0x42,0xc8]
+ cfcmovbq %r16, %r17, %r18
+
## MRMSrcReg4VOp3
# CHECK: bzhiq %r19, %r23, %r27
diff --git a/llvm/test/MC/X86/apx/evex-format-intel.s b/llvm/test/MC/X86/apx/evex-format-intel.s
index 06b5607..2b346e0 100644
--- a/llvm/test/MC/X86/apx/evex-format-intel.s
+++ b/llvm/test/MC/X86/apx/evex-format-intel.s
@@ -10,6 +10,12 @@
# CHECK: encoding: [0x62,0xec,0xec,0x10,0x01,0x41,0x7b]
add r18, qword ptr [r17 + 123], r16
+## MRMDestMemCC
+
+# CHECK: cfcmovb qword ptr [r17 + 4*r18 + 123], r16
+# CHECK: encoding: [0x62,0xec,0xf8,0x0c,0x42,0x44,0x91,0x7b]
+ cfcmovb qword ptr [r17 + 4*r18 + 123], r16
+
## MRMSrcMem
# CHECK: vbroadcasti32x4 zmm0, xmmword ptr [r16 + r17]
@@ -20,6 +26,16 @@
# CHECK: encoding: [0x62,0xec,0xec,0x10,0x2b,0x48,0x7b]
sub r18, r17, qword ptr [r16 + 123]
+## MRMSrcMemCC
+
+# CHECK: cfcmovb r18, qword ptr [r16 + 4*r17 + 123]
+# CHECK: encoding: [0x62,0xec,0xf8,0x08,0x42,0x54,0x88,0x7b]
+ cfcmovb r18, qword ptr [r16 + 4*r17 + 123]
+
+# CHECK: cfcmovb r19, r18, qword ptr [r16 + 4*r17 + 123]
+# CHECK: encoding: [0x62,0xec,0xe0,0x14,0x42,0x54,0x88,0x7b]
+ cfcmovb r19, r18, qword ptr [r16 + 4*r17 + 123]
+
## MRM0m
# CHECK: vprorq zmm0, zmmword ptr [r16 + r17], 0
@@ -122,12 +138,24 @@
# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x01,0xc1]
{nf} add r17, r16
+## MRMDestRegCC
+
+# CHECK: cfcmovb r17, r16
+# CHECK: encoding: [0x62,0xec,0xfc,0x0c,0x42,0xc1]
+ cfcmovb r17, r16
+
## MRMSrcReg
# CHECK: mulx r18, r17, r16
# CHECK: encoding: [0x62,0xea,0xf7,0x00,0xf6,0xd0]
mulx r18, r17, r16
+## MRMSrcRegCC
+
+# CHECK: cfcmovb r18, r17, r16
+# CHECK: encoding: [0x62,0xec,0xec,0x14,0x42,0xc8]
+ cfcmovb r18, r17, r16
+
## MRMSrcReg4VOp3
# CHECK: bzhi r27, r23, r19
diff --git a/llvm/test/MC/X86/apx/imulzu-att.s b/llvm/test/MC/X86/apx/imulzu-att.s
new file mode 100644
index 0000000..f56bfa7
--- /dev/null
+++ b/llvm/test/MC/X86/apx/imulzu-att.s
@@ -0,0 +1,41 @@
+# RUN: llvm-mc -triple x86_64 -show-encoding %s | FileCheck %s
+# RUN: not llvm-mc -triple i386 -show-encoding %s 2>&1 | FileCheck %s --check-prefix=ERROR
+
+# ERROR-COUNT-12: error:
+# ERROR-NOT: error:
+# CHECK: imulzuw $123, %dx, %dx
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x6b,0xd2,0x7b]
+ imulzuw $123, %dx, %dx
+# CHECK: imulzul $123, %ecx, %ecx
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x6b,0xc9,0x7b]
+ imulzul $123, %ecx, %ecx
+# CHECK: imulzuq $123, %r9, %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0xc9,0x7b]
+ imulzuq $123, %r9, %r9
+# CHECK: imulzuw $123, 291(%r8,%rax,4), %dx
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x6b,0x94,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzuw $123, 291(%r8,%rax,4), %dx
+# CHECK: imulzul $123, 291(%r8,%rax,4), %ecx
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzul $123, 291(%r8,%rax,4), %ecx
+# CHECK: imulzuq $123, 291(%r8,%rax,4), %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzuq $123, 291(%r8,%rax,4), %r9
+# CHECK: imulzuw $1234, %dx, %dx
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x69,0xd2,0xd2,0x04]
+ imulzuw $1234, %dx, %dx
+# CHECK: imulzuw $1234, 291(%r8,%rax,4), %dx
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x69,0x94,0x80,0x23,0x01,0x00,0x00,0xd2,0x04]
+ imulzuw $1234, 291(%r8,%rax,4), %dx
+# CHECK: imulzul $123456, %ecx, %ecx
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzul $123456, %ecx, %ecx
+# CHECK: imulzuq $123456, %r9, %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzuq $123456, %r9, %r9
+# CHECK: imulzul $123456, 291(%r8,%rax,4), %ecx
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzul $123456, 291(%r8,%rax,4), %ecx
+# CHECK: imulzuq $123456, 291(%r8,%rax,4), %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzuq $123456, 291(%r8,%rax,4), %r9
diff --git a/llvm/test/MC/X86/apx/imulzu-intel.s b/llvm/test/MC/X86/apx/imulzu-intel.s
new file mode 100644
index 0000000..3a01fdc
--- /dev/null
+++ b/llvm/test/MC/X86/apx/imulzu-intel.s
@@ -0,0 +1,38 @@
+# RUN: llvm-mc -triple x86_64 -show-encoding -x86-asm-syntax=intel -output-asm-variant=1 %s | FileCheck %s
+
+# CHECK: imulzu dx, dx, 123
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x6b,0xd2,0x7b]
+ imulzu dx, dx, 123
+# CHECK: imulzu ecx, ecx, 123
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x6b,0xc9,0x7b]
+ imulzu ecx, ecx, 123
+# CHECK: imulzu r9, r9, 123
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0xc9,0x7b]
+ imulzu r9, r9, 123
+# CHECK: imulzu dx, word ptr [r8 + 4*rax + 291], 123
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x6b,0x94,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzu dx, word ptr [r8 + 4*rax + 291], 123
+# CHECK: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzu ecx, dword ptr [r8 + 4*rax + 291], 123
+# CHECK: imulzu r9, qword ptr [r8 + 4*rax + 291], 123
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzu r9, qword ptr [r8 + 4*rax + 291], 123
+# CHECK: imulzu dx, dx, 1234
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x69,0xd2,0xd2,0x04]
+ imulzu dx, dx, 1234
+# CHECK: imulzu dx, word ptr [r8 + 4*rax + 291], 1234
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x69,0x94,0x80,0x23,0x01,0x00,0x00,0xd2,0x04]
+ imulzu dx, word ptr [r8 + 4*rax + 291], 1234
+# CHECK: imulzu ecx, ecx, 123456
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzu ecx, ecx, 123456
+# CHECK: imulzu r9, r9, 123456
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzu r9, r9, 123456
+# CHECK: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123456
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzu ecx, dword ptr [r8 + 4*rax + 291], 123456
+# CHECK: imulzu r9, qword ptr [r8 + 4*rax + 291], 123456
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzu r9, qword ptr [r8 + 4*rax + 291], 123456
diff --git a/llvm/test/MachineVerifier/test_adjustsstack.mir b/llvm/test/MachineVerifier/test_adjustsstack.mir
new file mode 100644
index 0000000..d333737
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_adjustsstack.mir
@@ -0,0 +1,26 @@
+# RUN: not --crash llc -o - -start-before=twoaddressinstruction -verify-machineinstrs %s 2>&1 \
+# RUN: | FileCheck %s
+# REQUIRES: aarch64-registered-target
+--- |
+ target triple = "aarch64-unknown-linux"
+ declare i32 @bar(i32) nounwind
+ define i32 @foo() nounwind {
+ call i32 @bar(i32 0)
+ ret i32 0
+ }
+...
+---
+name: foo
+registers:
+ - { id: 0, class: gpr32 }
+body: |
+ bb.0 (%ir-block.0):
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ %0 = COPY $wzr
+ $w0 = COPY %0
+ BL @bar, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $w0, implicit-def $sp, implicit-def $w0
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ $w0 = COPY killed %0
+ RET_ReallyLR implicit $w0
+...
+# CHECK-LABEL: Bad machine code: AdjustsStack not set in presence of a frame pseudo instruction.
diff --git a/llvm/test/MachineVerifier/test_g_splat_vector.mir b/llvm/test/MachineVerifier/test_g_splat_vector.mir
index 0d1d8a3..0007434 100644
--- a/llvm/test/MachineVerifier/test_g_splat_vector.mir
+++ b/llvm/test/MachineVerifier/test_g_splat_vector.mir
@@ -22,6 +22,6 @@ body: |
; CHECK: Source type must be a scalar
%6:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %2
- ; CHECK: Element type of the destination must be the same type as the source type
- %7:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %0
+ ; CHECK: Element type of the destination must be the same size or smaller than the source type
+ %7:_(<vscale x 2 x s128>) = G_SPLAT_VECTOR %0
...
diff --git a/llvm/test/MachineVerifier/test_g_ubsantrap.mir b/llvm/test/MachineVerifier/test_g_ubsantrap.mir
new file mode 100644
index 0000000..d2b219d
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_g_ubsantrap.mir
@@ -0,0 +1,18 @@
+# RUN: not --crash llc -o - -mtriple=arm64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+# REQUIRES: aarch64-registered-target
+
+---
+name: test_ubsantrap
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ ; CHECK: Crash kind must be 8 bit wide
+ G_UBSANTRAP 4096
+
+ ; CHECK: Crash kind must be an immediate
+ %5:_(s32) = IMPLICIT_DEF
+ G_UBSANTRAP %5
+
+...
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml
new file mode 100644
index 0000000..09885bd
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml
@@ -0,0 +1,97 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 14
+ PayloadSizeInBytes: 4092
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: ASEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 14
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 14
+# CHECK-NEXT: PayloadSizeInBytes: 4092
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: ASEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml
new file mode 100644
index 0000000..ee6fb11
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml
@@ -0,0 +1,95 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 5
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: CSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 5
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 5
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: CSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml
new file mode 100644
index 0000000..dd367de
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml
@@ -0,0 +1,105 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 4
+ InputControlPointCount: 1024
+ OutputPositionPresent: 1
+ TessellatorDomain: 2056
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigPatchConstOrPrimVectors: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 0, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: DSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ PatchOutputMap: []
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 4
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 4
+# CHECK-NEXT: InputControlPointCount: 1024
+# CHECK-NEXT: OutputPositionPresent: 1
+# CHECK-NEXT: TessellatorDomain: 2056
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigPatchConstOrPrimVectors: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 0, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: DSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: PatchOutputMap: [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml
new file mode 100644
index 0000000..4c7680b
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml
@@ -0,0 +1,105 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 2
+ InputPrimitive: 1024
+ OutputTopology: 4096
+ OutputStreamMask: 2056
+ OutputPositionPresent: 1
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ MaxVertexCount: 4096
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: GSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 2
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 2
+# CHECK-NEXT: InputPrimitive: 1024
+# CHECK-NEXT: OutputTopology: 4096
+# CHECK-NEXT: OutputStreamMask: 2056
+# CHECK-NEXT: OutputPositionPresent: 1
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: MaxVertexCount: 4096
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: GSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml
new file mode 100644
index 0000000..3bbad8a
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml
@@ -0,0 +1,107 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 3
+ InputControlPointCount: 1024
+ OutputControlPointCount: 4096
+ TessellatorDomain: 2056
+ TessellatorOutputPrimitive: 8192
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigPatchConstOrPrimVectors: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 0, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: HSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ InputPatchMap: []
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 3
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 3
+# CHECK-NEXT: InputControlPointCount: 1024
+# CHECK-NEXT: OutputControlPointCount: 4096
+# CHECK-NEXT: TessellatorDomain: 2056
+# CHECK-NEXT: TessellatorOutputPrimitive: 8192
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigPatchConstOrPrimVectors: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 0, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: HSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: InputPatchMap: [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml
new file mode 100644
index 0000000..c5ea1fc
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml
@@ -0,0 +1,109 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 13
+ GroupSharedBytesUsed: 1024
+ GroupSharedBytesDependentOnViewID: 2056
+ PayloadSizeInBytes: 4092
+ MaxOutputVertices: 8196
+ MaxOutputPrimitives: 4092
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigPrimVectors: 128
+ MeshOutputTopology: 16
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: MSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 13
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 13
+# CHECK-NEXT: GroupSharedBytesUsed: 1024
+# CHECK-NEXT: GroupSharedBytesDependentOnViewID: 2056
+# CHECK-NEXT: PayloadSizeInBytes: 4092
+# CHECK-NEXT: MaxOutputVertices: 8196
+# CHECK-NEXT: MaxOutputPrimitives: 4092
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigPrimVectors: 128
+# CHECK-NEXT: MeshOutputTopology: 16
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: MSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml
new file mode 100644
index 0000000..b28d5ec
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml
@@ -0,0 +1,99 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 0
+ DepthOutput: 7
+ SampleFrequency: 96
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: PSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 0
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 0
+# CHECK-NEXT: DepthOutput: 7
+# CHECK-NEXT: SampleFrequency: 96
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: PSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml
new file mode 100644
index 0000000..d1fb558
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml
@@ -0,0 +1,97 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 1
+ OutputPositionPresent: 1
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: VSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 1
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 1
+# CHECK-NEXT: OutputPositionPresent: 1
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: VSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/Other/optimize-inrange-gep.ll b/llvm/test/Other/optimize-inrange-gep.ll
index cc2bd15..2eae34b 100644
--- a/llvm/test/Other/optimize-inrange-gep.ll
+++ b/llvm/test/Other/optimize-inrange-gep.ll
@@ -1,4 +1,5 @@
-; RUN: opt -O0 -S < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -O0 -S < %s | FileCheck %s --check-prefix=O0
; RUN: opt -O1 -S < %s | FileCheck %s
; RUN: opt -O2 -S < %s | FileCheck %s
; RUN: opt -O3 -S < %s | FileCheck %s
@@ -7,12 +8,21 @@
target datalayout = "e-p:64:64"
-; Make sure that optimizations do not optimize inrange GEP.
+; Make sure that optimizations do not lose inrange information.
@vtable = constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr null, ptr null] }
define void @foo(ptr %p) {
- ;CHECK: store ptr getelementptr {{.*}} ({ [3 x ptr] }, ptr @vtable, i{{.*}} 0, inrange i32 0, i{{.*}} 3), ptr %p
- store ptr getelementptr ({ [3 x ptr] }, ptr @vtable, i32 0, inrange i32 0, i32 3), ptr %p
+; O0-LABEL: define void @foo(
+; O0-SAME: ptr [[P:%.*]]) {
+; O0-NEXT: store ptr getelementptr inrange(-24, 0) ({ [3 x ptr], [3 x ptr] }, ptr @vtable, i32 0, i32 0, i32 3), ptr [[P]], align 8
+; O0-NEXT: ret void
+;
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: ptr nocapture writeonly [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: store ptr getelementptr inbounds inrange(-24, 0) ({ [3 x ptr] }, ptr @vtable, i64 1, i32 0, i64 0), ptr [[P]], align 8
+; CHECK-NEXT: ret void
+;
+ store ptr getelementptr inrange(-24, 0) ({ [3 x ptr], [3 x ptr] }, ptr @vtable, i32 0, i32 0, i32 3), ptr %p
ret void
}
diff --git a/llvm/test/TableGen/ConcatenatedSubregs.td b/llvm/test/TableGen/ConcatenatedSubregs.td
index 5b354c9..ea4e7f0 100644
--- a/llvm/test/TableGen/ConcatenatedSubregs.td
+++ b/llvm/test/TableGen/ConcatenatedSubregs.td
@@ -90,16 +90,19 @@ def TestTarget : Target;
// CHECK-LABEL: RegisterClass DRegs:
// CHECK-LABEL: SubRegIndex ssub1:
-// CHECK: Offset, Size: 16, 16
+// CHECK: Offset: { Default:16 }
+// CHECK: Size: { Default:16 }
// CHECK-LABEL: SubRegIndex sub0:
// CHECK-LABEL: SubRegIndex sub1:
// CHECK-LABEL: SubRegIndex sub2:
// Check inferred indexes:
// CHECK-LABEL: SubRegIndex ssub1_ssub2:
-// CHECK: Offset, Size: 16, 65535
+// CHECK: Offset: { Default:16 }
+// CHECK: Size: { Default:65535 }
// CHECK-LABEL: SubRegIndex ssub3_ssub4:
// CHECK-LABEL: SubRegIndex ssub0_ssub1_ssub2_ssub3:
-// CHECK: Offset, Size: 65535, 65535
+// CHECK: Offset: { Default:65535 }
+// CHECK: Size: { Default:65535 }
// CHECK-LABEL: SubRegIndex ssub1_ssub2_ssub3_ssub4:
// Check that all subregs are generated on some examples
diff --git a/llvm/test/TableGen/ConstraintChecking3.td b/llvm/test/TableGen/ConstraintChecking3.td
index 2d5fe6b..886e6d5 100644
--- a/llvm/test/TableGen/ConstraintChecking3.td
+++ b/llvm/test/TableGen/ConstraintChecking3.td
@@ -4,5 +4,5 @@ include "ConstraintChecking.inc"
// (This is illegal because the '=' has to be surrounded by whitespace)
-// CHECK: [[FILE]]:[[@LINE+1]]:5: error: Illegal format for tied-to constraint in 'Foo'
+// CHECK: [[FILE]]:[[@LINE+1]]:5: error: Unrecognized constraint '$dest1=$dest2' in 'Foo'
def Foo : TestInstructionWithConstraints<"$dest1=$dest2">;
diff --git a/llvm/test/TableGen/ConstraintChecking8.td b/llvm/test/TableGen/ConstraintChecking8.td
new file mode 100644
index 0000000..37d3515
--- /dev/null
+++ b/llvm/test/TableGen/ConstraintChecking8.td
@@ -0,0 +1,34 @@
+// RUN: not llvm-tblgen -gen-asm-writer -DT0 -I %p -I %p/../../include %s 2>&1 | FileCheck %s -DFILE=%s
+// RUN: not llvm-tblgen -gen-asm-writer -DT1 -I %p -I %p/../../include %s 2>&1 | FileCheck %s -DFILE=%s --check-prefix=CHECK1
+// RUN: not llvm-tblgen -gen-asm-writer -DT2 -I %p -I %p/../../include %s 2>&1 | FileCheck %s -DFILE=%s --check-prefix=CHECK2
+// RUN: not llvm-tblgen -gen-asm-writer -DT3 -I %p -I %p/../../include %s 2>&1 | FileCheck %s -DFILE=%s --check-prefix=CHECK3
+// RUN: not llvm-tblgen -gen-asm-writer -DT4 -I %p -I %p/../../include %s 2>&1 | FileCheck %s -DFILE=%s --check-prefix=CHECK4
+
+include "ConstraintChecking.inc"
+
+// Make sure exactly the token "=" appears.
+
+#ifdef T0
+// CHECK: [[FILE]]:[[@LINE+1]]:5: error: Unrecognized constraint '$dest1 != $src2' in 'Foo'
+def Foo : TestInstructionWithConstraints<"$dest1 != $src2">;
+#endif
+
+#ifdef T1
+// CHECK1: [[FILE]]:[[@LINE+1]]:5: error: Unrecognized constraint '$dest1 == $src2' in 'Foo'
+def Foo : TestInstructionWithConstraints<"$dest1 == $src2">;
+#endif
+
+#ifdef T2
+// CHECK2: [[FILE]]:[[@LINE+1]]:5: error: Unrecognized constraint '= $rhs' in 'Foo'
+def Foo : TestInstructionWithConstraints<"= $rhs">;
+#endif
+
+#ifdef T3
+// CHECK3: [[FILE]]:[[@LINE+1]]:5: error: Unrecognized constraint '$lhs =' in 'Foo'
+def Foo : TestInstructionWithConstraints<"$lhs =">;
+#endif
+
+#ifdef T4
+// CHECK4: [[FILE]]:[[@LINE+1]]:5: error: Unrecognized constraint '=' in 'Foo'
+def Foo : TestInstructionWithConstraints<"=">;
+#endif
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
index 5cf4e04..0189d3d 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
@@ -114,14 +114,17 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: void GenMyCombiner::runCustomAction(unsigned ApplyID, const MatcherState &State, NewMIVector &OutMIs) const {
// CHECK-NEXT: switch(ApplyID) {
// CHECK-NEXT: case GICXXCustomAction_CombineApplyGICombiner0:{
+// CHECK-NEXT: Helper.getBuilder().setInstrAndDebugLoc(*State.MIs[0]);
// CHECK-NEXT: APPLY
// CHECK-NEXT: return;
// CHECK-NEXT: }
// CHECK-NEXT: case GICXXCustomAction_CombineApplyGICombiner1:{
+// CHECK-NEXT: Helper.getBuilder().setInstrAndDebugLoc(*State.MIs[0]);
// CHECK-NEXT: APPLY MatchInfos.MDInfo0, MatchInfos.MDInfo1
// CHECK-NEXT: return;
// CHECK-NEXT: }
// CHECK-NEXT: case GICXXCustomAction_CombineApplyGICombiner2:{
+// CHECK-NEXT: Helper.getBuilder().setInstrAndDebugLoc(*State.MIs[0]);
// CHECK-NEXT: APPLY State.MIs[1]->getOperand(1) State.MIs[0]->getOperand(1) OutMIs[0]
// CHECK-NEXT: return;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/HwModeEncodeDecode2.td b/llvm/test/TableGen/HwModeEncodeDecode2.td
index 5159501..cf96dda 100644
--- a/llvm/test/TableGen/HwModeEncodeDecode2.td
+++ b/llvm/test/TableGen/HwModeEncodeDecode2.td
@@ -1,6 +1,6 @@
// RUN: llvm-tblgen -gen-disassembler -I %p/../../include %s | \
// RUN: FileCheck %s --check-prefix=DECODER
-// RUN: llvm-tblgen -gen-disassembler --suppress-per-hwmode-duplicates -I \
+// RUN: llvm-tblgen -gen-disassembler --suppress-per-hwmode-duplicates=O2 -I \
// RUN: %p/../../include %s | FileCheck %s --check-prefix=DECODER-SUPPRESS
// Test duplicate table suppression for per-HwMode decoders.
@@ -105,11 +105,10 @@ let OutOperandList = (outs) in {
// DECODER-DAG: Opcode: fooTypeEncA:baz
// DECODER-DAG: Opcode: bar
-
-// DECODER-SUPPRESS-LABEL: DecoderTableAlt_AllModes32[] =
-// DECODER-SUPPRESS-DAG: Opcode: unrelated
-// DECODER-SUPPRESS-LABEL: DecoderTable_AllModes32[] =
+// DECODER-SUPPRESS-LABEL: DecoderTable32[] =
// DECODER-SUPPRESS-DAG: Opcode: bar
+// DECODER-SUPPRESS-LABEL: DecoderTableAlt32[] =
+// DECODER-SUPPRESS-DAG: Opcode: unrelated
// DECODER-SUPPRESS-LABEL: DecoderTable_ModeA32[] =
// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncA:foo
// DECODER-SUPPRESS-NOT: Opcode: bar
diff --git a/llvm/test/TableGen/HwModeEncodeDecode3.td b/llvm/test/TableGen/HwModeEncodeDecode3.td
index 406e52d..8e0266b 100644
--- a/llvm/test/TableGen/HwModeEncodeDecode3.td
+++ b/llvm/test/TableGen/HwModeEncodeDecode3.td
@@ -2,8 +2,10 @@
// RUN: FileCheck %s --check-prefix=ENCODER
// RUN: llvm-tblgen -gen-disassembler -I %p/../../include %s | \
// RUN: FileCheck %s --check-prefix=DECODER
-// RUN: llvm-tblgen -gen-disassembler --suppress-per-hwmode-duplicates -I \
-// RUN: %p/../../include %s | FileCheck %s --check-prefix=DECODER-SUPPRESS
+// RUN: llvm-tblgen -gen-disassembler --suppress-per-hwmode-duplicates=O1 -I \
+// RUN: %p/../../include %s | FileCheck %s --check-prefix=DECODER-SUPPRESS-O1
+// RUN: llvm-tblgen -gen-disassembler --suppress-per-hwmode-duplicates=O2 -I \
+// RUN: %p/../../include %s | FileCheck %s --check-prefix=DECODER-SUPPRESS-O2
include "llvm/Target/Target.td"
@@ -99,16 +101,20 @@ def unrelated: Instruction {
}
-// DECODER-LABEL: DecoderTableAlt_DefaultMode32[] =
+// Under default settings, using 'HwMode' to dictate instruction encodings results in
+// significant duplication of DecoderTables. The three tables ‘DecoderTableAlt32’,
+// ‘DecoderTableAlt_ModeA32’, and ‘DecoderTableAlt_ModeB32’ are exact duplicates and
+// could effectively be merged into one.
+// DECODER-LABEL: DecoderTable32[] =
+// DECODER-DAG: Opcode: bar
+// DECODER-LABEL: DecoderTable64[] =
+// DECODER-DAG: Opcode: fooTypeEncDefault:foo
+// DECODER-LABEL: DecoderTableAlt32[] =
// DECODER-DAG: Opcode: unrelated
// DECODER-LABEL: DecoderTableAlt_ModeA32[] =
// DECODER-DAG: Opcode: unrelated
// DECODER-LABEL: DecoderTableAlt_ModeB32[] =
// DECODER-DAG: Opcode: unrelated
-// DECODER-LABEL: DecoderTable_DefaultMode32[] =
-// DECODER-DAG: Opcode: bar
-// DECODER-LABEL: DecoderTable_DefaultMode64[] =
-// DECODER-DAG: Opcode: fooTypeEncDefault:foo
// DECODER-LABEL: DecoderTable_ModeA32[] =
// DECODER-DAG: Opcode: fooTypeEncA:foo
// DECODER-DAG: Opcode: bar
@@ -117,21 +123,42 @@ def unrelated: Instruction {
// DECODER-DAG: Opcode: fooTypeEncA:baz
// DECODER-DAG: Opcode: bar
-
-// DECODER-SUPPRESS-LABEL: DecoderTableAlt_AllModes32[] =
-// DECODER-SUPPRESS-DAG: Opcode: unrelated
-// DECODER-SUPPRESS-LABEL: DecoderTable_AllModes32[] =
-// DECODER-SUPPRESS-DAG: Opcode: bar
-// DECODER-SUPPRESS-LABEL: DecoderTable_DefaultMode64[] =
-// DECODER-SUPPRESS-NOT: Opcode: bar
-// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncDefault:foo
-// DECODER-SUPPRESS-LABEL: DecoderTable_ModeA32[] =
-// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncA:foo
-// DECODER-SUPPRESS-NOT: Opcode: bar
-// DECODER-SUPPRESS-LABEL: DecoderTable_ModeB32[] =
-// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncB:foo
-// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncA:baz
-// DECODER-SUPPRESS-NOT: Opcode: bar
+// Under the 'O1' optimization level, unnecessary duplicate tables will be eliminated,
+// reducing the three ‘Alt’ tables down to just one.
+// DECODER-SUPPRESS-O1-LABEL: DecoderTable32[] =
+// DECODER-SUPPRESS-O1-DAG: Opcode: bar
+// DECODER-SUPPRESS-O1-LABEL: DecoderTable64[] =
+// DECODER-SUPPRESS-O1-DAG: Opcode: fooTypeEncDefault:foo
+// DECODER-SUPPRESS-O1-LABEL: DecoderTableAlt32[] =
+// DECODER-SUPPRESS-O1-DAG: Opcode: unrelated
+// DECODER-SUPPRESS-O1-LABEL: DecoderTable_ModeA32[] =
+// DECODER-SUPPRESS-O1-DAG: Opcode: fooTypeEncA:foo
+// DECODER-SUPPRESS-O1-DAG: Opcode: bar
+// DECODER-SUPPRESS-O1-LABEL: DecoderTable_ModeB32[] =
+// DECODER-SUPPRESS-O1-DAG: Opcode: fooTypeEncB:foo
+// DECODER-SUPPRESS-O1-DAG: Opcode: fooTypeEncA:baz
+// DECODER-SUPPRESS-O1-DAG: Opcode: bar
+
+// Under the 'O2' optimization condition, instructions possessing the 'EncodingByHwMode'
+// attribute will be extracted from their original DecoderNamespace and placed into their
+// respective HwMode tables. Meanwhile, other instructions that do not have the 'EncodingByHwMode'
+// attribute but are within the same DecoderNamespace will be stored in the 'Default' table. This
+// approach will significantly reduce instruction redundancy, but it necessitates users to thoroughly
+// consider the interplay between HwMode and DecoderNamespace for their instructions.
+// DECODER-SUPPRESS-O2-LABEL: DecoderTable32[] =
+// DECODER-SUPPRESS-O2-DAG: Opcode: bar
+// DECODER-SUPPRESS-O2-LABEL: DecoderTable64[] =
+// DECODER-SUPPRESS-O2-NOT: Opcode: bar
+// DECODER-SUPPRESS-O2-DAG: Opcode: fooTypeEncDefault:foo
+// DECODER-SUPPRESS-O2-LABEL: DecoderTableAlt32[] =
+// DECODER-SUPPRESS-O2-DAG: Opcode: unrelated
+// DECODER-SUPPRESS-O2-LABEL: DecoderTable_ModeA32[] =
+// DECODER-SUPPRESS-O2-DAG: Opcode: fooTypeEncA:foo
+// DECODER-SUPPRESS-O2-NOT: Opcode: bar
+// DECODER-SUPPRESS-O2-LABEL: DecoderTable_ModeB32[] =
+// DECODER-SUPPRESS-O2-DAG: Opcode: fooTypeEncB:foo
+// DECODER-SUPPRESS-O2-DAG: Opcode: fooTypeEncA:baz
+// DECODER-SUPPRESS-O2-NOT: Opcode: bar
// ENCODER-LABEL: static const uint64_t InstBits_DefaultMode[] = {
// ENCODER: UINT64_C(2), // bar
diff --git a/llvm/test/TableGen/HwModeSubRegs.td b/llvm/test/TableGen/HwModeSubRegs.td
new file mode 100644
index 0000000..2bf7a917
--- /dev/null
+++ b/llvm/test/TableGen/HwModeSubRegs.td
@@ -0,0 +1,75 @@
+// RUN: llvm-tblgen -gen-register-info -register-info-debug -I %p/../../include %s -o /dev/null 2>&1 | FileCheck %s
+include "llvm/Target/Target.td"
+
+def HasFeat : Predicate<"Subtarget->hasFeat()">;
+
+def TestMode : HwMode<"+feat1", [HasFeat]>;
+
+class MyReg<string n>
+ : Register<n> {
+ let Namespace = "Test";
+}
+class MyClass<int size, list<ValueType> types, dag registers>
+ : RegisterClass<"Test", types, size, registers> {
+ let Size = size;
+}
+
+def X0 : MyReg<"x0">;
+def X1 : MyReg<"x1">;
+def X2 : MyReg<"x2">;
+def X3 : MyReg<"x3">;
+def X4 : MyReg<"x4">;
+def X5 : MyReg<"x5">;
+def X6 : MyReg<"x6">;
+def X7 : MyReg<"x7">;
+def X8 : MyReg<"x8">;
+def X9 : MyReg<"x9">;
+def X10 : MyReg<"x10">;
+def X11 : MyReg<"x11">;
+def X12 : MyReg<"x12">;
+def X13 : MyReg<"x13">;
+def X14 : MyReg<"x14">;
+def X15 : MyReg<"x15">;
+
+def ModeVT : ValueTypeByHwMode<[DefaultMode, TestMode],
+ [i32, i64]>;
+let RegInfos = RegInfoByHwMode<[DefaultMode, TestMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>]> in
+def XRegs : MyClass<32, [ModeVT], (sequence "X%u", 0, 15)>;
+
+def sub_even : SubRegIndex<32> {
+ let SubRegRanges = SubRegRangeByHwMode<[DefaultMode, TestMode],
+ [SubRegRange<32>, SubRegRange<64>]>;
+}
+def sub_odd : SubRegIndex<32, 32> {
+ let SubRegRanges = SubRegRangeByHwMode<[DefaultMode, TestMode],
+ [SubRegRange<32, 32>, SubRegRange<64, 64>]>;
+}
+
+def XPairs : RegisterTuples<[sub_even, sub_odd],
+ [(decimate (rotl XRegs, 0), 2),
+ (decimate (rotl XRegs, 1), 2)]>;
+
+let RegInfos = RegInfoByHwMode<[DefaultMode, TestMode],
+ [RegInfo<64,64,32>, RegInfo<128,128,64>]> in
+def XPairsClass : MyClass<64, [untyped], (add XPairs)>;
+
+def TestTarget : Target;
+
+// CHECK-LABEL: RegisterClass XRegs:
+// CHECK: SpillSize: { Default:32 TestMode:64 }
+// CHECK: SpillAlignment: { Default:32 TestMode:64 }
+// CHECK: Regs: X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+
+// CHECK-LABEL: RegisterClass XPairsClass:
+// CHECK: SpillSize: { Default:64 TestMode:128 }
+// CHECK: SpillAlignment: { Default:32 TestMode:64 }
+// CHECK: CoveredBySubRegs: 1
+// CHECK: Regs: X0_X1 X2_X3 X4_X5 X6_X7 X8_X9 X10_X11 X12_X13 X14_X15
+
+// CHECK-LABEL: SubRegIndex sub_even:
+// CHECK: Offset: { Default:0 TestMode:0 }
+// CHECK: Size: { Default:32 TestMode:64 }
+// CHECK-LABEL: SubRegIndex sub_odd:
+// CHECK: Offset: { Default:32 TestMode:64 }
+// CHECK: Size: { Default:32 TestMode:64 }
diff --git a/llvm/test/TableGen/MacroFusion.td b/llvm/test/TableGen/MacroFusion.td
index ce76e7f..6cf22f5 100644
--- a/llvm/test/TableGen/MacroFusion.td
+++ b/llvm/test/TableGen/MacroFusion.td
@@ -33,6 +33,8 @@ let Namespace = "Test" in {
def Inst0 : TestInst<0>;
def Inst1 : TestInst<1>;
+let isCommutable = true in
+def Inst2 : TestInst<2>;
def BothFusionPredicate: BothFusionPredicateWithMCInstPredicate<CheckRegOperand<0, X0>>;
def TestBothFusionPredicate: Fusion<"test-both-fusion-predicate", "HasBothFusionPredicate",
@@ -42,16 +44,32 @@ def TestBothFusionPredicate: Fusion<"test-both-fusion-predicate", "HasBothFusion
def TestFusion: SimpleFusion<"test-fusion", "HasTestFusion", "Test Fusion",
CheckOpcode<[Inst0]>,
CheckAll<[
- CheckOpcode<[Inst1]>,
- CheckRegOperand<0, X0>
+ CheckOpcode<[Inst1]>,
+ CheckRegOperand<0, X0>
]>>;
+let IsCommutable = 1 in
+def TestCommutableFusion: SimpleFusion<"test-commutable-fusion", "HasTestCommutableFusion",
+ "Test Commutable Fusion",
+ CheckOpcode<[Inst0]>,
+ CheckAll<[
+ CheckOpcode<[Inst1]>,
+ CheckRegOperand<0, X0>
+ ]>>;
+
+def TestSingleFusion: SingleFusion<"test-single-fusion", "HasTestSingleFusion",
+ "Test SingleFusion",
+ Inst0, Inst2,
+ secondInstPred=CheckRegOperand<0, X0>>;
+
// CHECK-PREDICATOR: #ifdef GET_Test_MACRO_FUSION_PRED_DECL
// CHECK-PREDICATOR-NEXT: #undef GET_Test_MACRO_FUSION_PRED_DECL
// CHECK-PREDICATOR-EMPTY:
// CHECK-PREDICATOR-NEXT: namespace llvm {
// CHECK-PREDICATOR-NEXT: bool isTestBothFusionPredicate(const TargetInstrInfo &, const TargetSubtargetInfo &, const MachineInstr *, const MachineInstr &);
+// CHECK-PREDICATOR-NEXT: bool isTestCommutableFusion(const TargetInstrInfo &, const TargetSubtargetInfo &, const MachineInstr *, const MachineInstr &);
// CHECK-PREDICATOR-NEXT: bool isTestFusion(const TargetInstrInfo &, const TargetSubtargetInfo &, const MachineInstr *, const MachineInstr &);
+// CHECK-PREDICATOR-NEXT: bool isTestSingleFusion(const TargetInstrInfo &, const TargetSubtargetInfo &, const MachineInstr *, const MachineInstr &);
// CHECK-PREDICATOR-NEXT: } // end namespace llvm
// CHECK-PREDICATOR-EMPTY:
// CHECK-PREDICATOR-NEXT: #endif
@@ -65,7 +83,7 @@ def TestFusion: SimpleFusion<"test-fusion", "HasTestFusion", "Test Fusion",
// CHECK-PREDICATOR-NEXT: const TargetSubtargetInfo &STI,
// CHECK-PREDICATOR-NEXT: const MachineInstr *FirstMI,
// CHECK-PREDICATOR-NEXT: const MachineInstr &SecondMI) {
-// CHECK-PREDICATOR-NEXT: auto &MRI = SecondMI.getMF()->getRegInfo();
+// CHECK-PREDICATOR-NEXT: {{[[]}}{{[[]}}maybe_unused{{[]]}}{{[]]}} auto &MRI = SecondMI.getMF()->getRegInfo();
// CHECK-PREDICATOR-NEXT: {
// CHECK-PREDICATOR-NEXT: const MachineInstr *MI = FirstMI;
// CHECK-PREDICATOR-NEXT: if (MI->getOperand(0).getReg() != Test::X0)
@@ -78,12 +96,12 @@ def TestFusion: SimpleFusion<"test-fusion", "HasTestFusion", "Test Fusion",
// CHECK-PREDICATOR-NEXT: }
// CHECK-PREDICATOR-NEXT: return true;
// CHECK-PREDICATOR-NEXT: }
-// CHECK-PREDICATOR-NEXT: bool isTestFusion(
+// CHECK-PREDICATOR-NEXT: bool isTestCommutableFusion(
// CHECK-PREDICATOR-NEXT: const TargetInstrInfo &TII,
// CHECK-PREDICATOR-NEXT: const TargetSubtargetInfo &STI,
// CHECK-PREDICATOR-NEXT: const MachineInstr *FirstMI,
// CHECK-PREDICATOR-NEXT: const MachineInstr &SecondMI) {
-// CHECK-PREDICATOR-NEXT: auto &MRI = SecondMI.getMF()->getRegInfo();
+// CHECK-PREDICATOR-NEXT: {{[[]}}{{[[]}}maybe_unused{{[]]}}{{[]]}} auto &MRI = SecondMI.getMF()->getRegInfo();
// CHECK-PREDICATOR-NEXT: {
// CHECK-PREDICATOR-NEXT: const MachineInstr *MI = &SecondMI;
// CHECK-PREDICATOR-NEXT: if (!(
@@ -99,14 +117,58 @@ def TestFusion: SimpleFusion<"test-fusion", "HasTestFusion", "Test Fusion",
// CHECK-PREDICATOR-NEXT: if (( MI->getOpcode() != Test::Inst0 ))
// CHECK-PREDICATOR-NEXT: return false;
// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: if (!SecondMI.getOperand(0).getReg().isVirtual()) {
+// CHECK-PREDICATOR-NEXT: if (SecondMI.getOperand(0).getReg() != SecondMI.getOperand(1).getReg()) {
+// CHECK-PREDICATOR-NEXT: if (!SecondMI.getDesc().isCommutable())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: unsigned SrcOpIdx1 = 1, SrcOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
+// CHECK-PREDICATOR-NEXT: if (TII.findCommutedOpIndices(SecondMI, SrcOpIdx1, SrcOpIdx2))
+// CHECK-PREDICATOR-NEXT: if (SecondMI.getOperand(0).getReg() != SecondMI.getOperand(SrcOpIdx2).getReg())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: {
+// CHECK-PREDICATOR-NEXT: Register FirstDest = FirstMI->getOperand(0).getReg();
+// CHECK-PREDICATOR-NEXT: if (FirstDest.isVirtual() && !MRI.hasOneNonDBGUse(FirstDest))
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: if (!(FirstMI->getOperand(0).isReg() &&
+// CHECK-PREDICATOR-NEXT: SecondMI.getOperand(1).isReg() &&
+// CHECK-PREDICATOR-NEXT: FirstMI->getOperand(0).getReg() == SecondMI.getOperand(1).getReg())) {
+// CHECK-PREDICATOR-NEXT: if (!SecondMI.getDesc().isCommutable())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: unsigned SrcOpIdx1 = 1, SrcOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
+// CHECK-PREDICATOR-NEXT: if (TII.findCommutedOpIndices(SecondMI, SrcOpIdx1, SrcOpIdx2))
+// CHECK-PREDICATOR-NEXT: if (FirstMI->getOperand(0).getReg() != SecondMI.getOperand(SrcOpIdx2).getReg())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: return true;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: bool isTestFusion(
+// CHECK-PREDICATOR-NEXT: const TargetInstrInfo &TII,
+// CHECK-PREDICATOR-NEXT: const TargetSubtargetInfo &STI,
+// CHECK-PREDICATOR-NEXT: const MachineInstr *FirstMI,
+// CHECK-PREDICATOR-NEXT: const MachineInstr &SecondMI) {
+// CHECK-PREDICATOR-NEXT: {{[[]}}{{[[]}}maybe_unused{{[]]}}{{[]]}} auto &MRI = SecondMI.getMF()->getRegInfo();
// CHECK-PREDICATOR-NEXT: {
// CHECK-PREDICATOR-NEXT: const MachineInstr *MI = &SecondMI;
// CHECK-PREDICATOR-NEXT: if (!(
-// CHECK-PREDICATOR-NEXT: MI->getOperand(0).getReg().isVirtual()
-// CHECK-PREDICATOR-NEXT: || MI->getOperand(0).getReg() == MI->getOperand(1).getReg()
+// CHECK-PREDICATOR-NEXT: ( MI->getOpcode() == Test::Inst1 )
+// CHECK-PREDICATOR-NEXT: && MI->getOperand(0).getReg() == Test::X0
// CHECK-PREDICATOR-NEXT: ))
// CHECK-PREDICATOR-NEXT: return false;
// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: if (!FirstMI)
+// CHECK-PREDICATOR-NEXT: return true;
+// CHECK-PREDICATOR-NEXT: {
+// CHECK-PREDICATOR-NEXT: const MachineInstr *MI = FirstMI;
+// CHECK-PREDICATOR-NEXT: if (( MI->getOpcode() != Test::Inst0 ))
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: if (!SecondMI.getOperand(0).getReg().isVirtual()) {
+// CHECK-PREDICATOR-NEXT: if (SecondMI.getOperand(0).getReg() != SecondMI.getOperand(1).getReg())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
// CHECK-PREDICATOR-NEXT: {
// CHECK-PREDICATOR-NEXT: Register FirstDest = FirstMI->getOperand(0).getReg();
// CHECK-PREDICATOR-NEXT: if (FirstDest.isVirtual() && !MRI.hasOneNonDBGUse(FirstDest))
@@ -118,12 +180,66 @@ def TestFusion: SimpleFusion<"test-fusion", "HasTestFusion", "Test Fusion",
// CHECK-PREDICATOR-NEXT: return false;
// CHECK-PREDICATOR-NEXT: return true;
// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: bool isTestSingleFusion(
+// CHECK-PREDICATOR-NEXT: const TargetInstrInfo &TII,
+// CHECK-PREDICATOR-NEXT: const TargetSubtargetInfo &STI,
+// CHECK-PREDICATOR-NEXT: const MachineInstr *FirstMI,
+// CHECK-PREDICATOR-NEXT: const MachineInstr &SecondMI) {
+// CHECK-PREDICATOR-NEXT: {{[[]}}{{[[]}}maybe_unused{{[]]}}{{[]]}} auto &MRI = SecondMI.getMF()->getRegInfo();
+// CHECK-PREDICATOR-NEXT: {
+// CHECK-PREDICATOR-NEXT: const MachineInstr *MI = &SecondMI;
+// CHECK-PREDICATOR-NEXT: if (!(
+// CHECK-PREDICATOR-NEXT: ( MI->getOpcode() == Test::Inst2 )
+// CHECK-PREDICATOR-NEXT: && MI->getOperand(0).getReg() == Test::X0
+// CHECK-PREDICATOR-NEXT: ))
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: if (!FirstMI)
+// CHECK-PREDICATOR-NEXT: return true;
+// CHECK-PREDICATOR-NEXT: {
+// CHECK-PREDICATOR-NEXT: const MachineInstr *MI = FirstMI;
+// CHECK-PREDICATOR-NEXT: if (!(
+// CHECK-PREDICATOR-NEXT: ( MI->getOpcode() == Test::Inst0 )
+// CHECK-PREDICATOR-NEXT: && true
+// CHECK-PREDICATOR-NEXT: ))
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: if (!SecondMI.getOperand(0).getReg().isVirtual()) {
+// CHECK-PREDICATOR-NEXT: if (SecondMI.getOperand(0).getReg() != SecondMI.getOperand(1).getReg()) {
+// CHECK-PREDICATOR-NEXT: if (!SecondMI.getDesc().isCommutable())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: unsigned SrcOpIdx1 = 1, SrcOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
+// CHECK-PREDICATOR-NEXT: if (TII.findCommutedOpIndices(SecondMI, SrcOpIdx1, SrcOpIdx2))
+// CHECK-PREDICATOR-NEXT: if (SecondMI.getOperand(0).getReg() != SecondMI.getOperand(SrcOpIdx2).getReg())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: {
+// CHECK-PREDICATOR-NEXT: Register FirstDest = FirstMI->getOperand(0).getReg();
+// CHECK-PREDICATOR-NEXT: if (FirstDest.isVirtual() && !MRI.hasOneNonDBGUse(FirstDest))
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: if (!(FirstMI->getOperand(0).isReg() &&
+// CHECK-PREDICATOR-NEXT: SecondMI.getOperand(1).isReg() &&
+// CHECK-PREDICATOR-NEXT: FirstMI->getOperand(0).getReg() == SecondMI.getOperand(1).getReg())) {
+// CHECK-PREDICATOR-NEXT: if (!SecondMI.getDesc().isCommutable())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: unsigned SrcOpIdx1 = 1, SrcOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
+// CHECK-PREDICATOR-NEXT: if (TII.findCommutedOpIndices(SecondMI, SrcOpIdx1, SrcOpIdx2))
+// CHECK-PREDICATOR-NEXT: if (FirstMI->getOperand(0).getReg() != SecondMI.getOperand(SrcOpIdx2).getReg())
+// CHECK-PREDICATOR-NEXT: return false;
+// CHECK-PREDICATOR-NEXT: }
+// CHECK-PREDICATOR-NEXT: return true;
+// CHECK-PREDICATOR-NEXT: }
// CHECK-PREDICATOR-NEXT: } // end namespace llvm
// CHECK-PREDICATOR-EMPTY:
// CHECK-PREDICATOR-NEXT: #endif
// Check that we have generated target subfeature.
+// CHECK-SUBTARGET: { "test-both-fusion-predicate", "Test BothFusionPredicate", Test::TestBothFusionPredicate
+// CHECK-SUBTARGET: { "test-commutable-fusion", "Test Commutable Fusion", Test::TestCommutableFusion
// CHECK-SUBTARGET: { "test-fusion", "Test Fusion", Test::TestFusion
+// CHECK-SUBTARGET: { "test-single-fusion", "Test SingleFusion", Test::TestSingleFusion
// Check that we have generated `getMacroFusions()` function.
// CHECK-SUBTARGET: std::vector<MacroFusionPredTy> getMacroFusions() const override;
@@ -131,6 +247,8 @@ def TestFusion: SimpleFusion<"test-fusion", "HasTestFusion", "Test Fusion",
// CHECK-SUBTARGET: std::vector<MacroFusionPredTy> TestGenSubtargetInfo::getMacroFusions() const {
// CHECK-SUBTARGET-NEXT: std::vector<MacroFusionPredTy> Fusions;
// CHECK-SUBTARGET-NEXT: if (hasFeature(Test::TestBothFusionPredicate)) Fusions.push_back(llvm::isTestBothFusionPredicate);
+// CHECK-SUBTARGET-NEXT: if (hasFeature(Test::TestCommutableFusion)) Fusions.push_back(llvm::isTestCommutableFusion);
// CHECK-SUBTARGET-NEXT: if (hasFeature(Test::TestFusion)) Fusions.push_back(llvm::isTestFusion);
+// CHECK-SUBTARGET-NEXT: if (hasFeature(Test::TestSingleFusion)) Fusions.push_back(llvm::isTestSingleFusion);
// CHECK-SUBTARGET-NEXT: return Fusions;
// CHECK-SUBTARGET-NEXT: }
diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index eea4f87..4ab5567 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -756,6 +756,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::IMUL64rri32_NF, X86::IMUL64rmi32_NF, 0},
{X86::IMUL64rri8, X86::IMUL64rmi8, 0},
{X86::IMUL64rri8_NF, X86::IMUL64rmi8_NF, 0},
+ {X86::IMULZU16rri, X86::IMULZU16rmi, 0},
+ {X86::IMULZU16rri8, X86::IMULZU16rmi8, 0},
+ {X86::IMULZU32rri, X86::IMULZU32rmi, 0},
+ {X86::IMULZU32rri8, X86::IMULZU32rmi8, 0},
+ {X86::IMULZU64rri32, X86::IMULZU64rmi32, 0},
+ {X86::IMULZU64rri8, X86::IMULZU64rmi8, 0},
{X86::INC16r_ND, X86::INC16m_ND, 0},
{X86::INC16r_NF_ND, X86::INC16m_NF_ND, 0},
{X86::INC32r_ND, X86::INC32m_ND, 0},
@@ -1937,8 +1943,11 @@ static const X86FoldTableEntry Table2[] = {
{X86::BLENDVPDrr0, X86::BLENDVPDrm0, TB_ALIGN_16},
{X86::BLENDVPSrr0, X86::BLENDVPSrm0, TB_ALIGN_16},
{X86::CMOV16rr, X86::CMOV16rm, 0},
+ {X86::CMOV16rr_ND, X86::CMOV16rm_ND, 0},
{X86::CMOV32rr, X86::CMOV32rm, 0},
+ {X86::CMOV32rr_ND, X86::CMOV32rm_ND, 0},
{X86::CMOV64rr, X86::CMOV64rm, 0},
+ {X86::CMOV64rr_ND, X86::CMOV64rm_ND, 0},
{X86::CMPPDrri, X86::CMPPDrmi, TB_ALIGN_16},
{X86::CMPPSrri, X86::CMPPSrmi, TB_ALIGN_16},
{X86::CMPSDrri, X86::CMPSDrmi, 0},
diff --git a/llvm/test/ThinLTO/X86/Inputs/devirt_single_hybrid_bar.ll b/llvm/test/ThinLTO/X86/Inputs/devirt_single_hybrid_bar.ll
index 721d6efb..d8c6525 100644
--- a/llvm/test/ThinLTO/X86/Inputs/devirt_single_hybrid_bar.ll
+++ b/llvm/test/ThinLTO/X86/Inputs/devirt_single_hybrid_bar.ll
@@ -23,7 +23,7 @@ define hidden i32 @_Z3barv() local_unnamed_addr #0 {
entry:
%b = alloca %struct.A, align 8
call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %b)
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %b, align 8, !tbaa !4
+ store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %b, align 8, !tbaa !4
%call = call i32 @_Z3fooP1A(ptr nonnull %b)
%add = add nsw i32 %call, 10
call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %b) #4
diff --git a/llvm/test/ThinLTO/X86/devirt_after_filtering_unreachable.ll b/llvm/test/ThinLTO/X86/devirt_after_filtering_unreachable.ll
index 68b83de..39f42da 100644
--- a/llvm/test/ThinLTO/X86/devirt_after_filtering_unreachable.ll
+++ b/llvm/test/ThinLTO/X86/devirt_after_filtering_unreachable.ll
@@ -71,7 +71,7 @@ target triple = "x86_64-unknown-linux-gnu"
define hidden i32 @main() {
entry:
%call = tail call ptr @_Znwm(i64 8)
- store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, inrange i32 0, i64 2), ptr %call
+ store ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV7Derived, i64 0, i32 0, i64 2), ptr %call
tail call void @_Z3fooP4Base(ptr nonnull %call)
ret i32 0
}
diff --git a/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll b/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
index 2417532..1f0737b 100644
--- a/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
+++ b/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
@@ -51,7 +51,7 @@ define i32 @_ZN1B1nEi(ptr %this, i32 %a) #0 comdat($_ZTV1B) {
; Ensures that vtable of B is live so that we will attempt devirt.
define dso_local i32 @use_B(ptr %a) {
entry:
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %a, align 8
+ store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %a, align 8
ret i32 0
}
diff --git a/llvm/test/ThinLTO/X86/devirt_local_same_guid.ll b/llvm/test/ThinLTO/X86/devirt_local_same_guid.ll
index 3efea8d..2205545 100644
--- a/llvm/test/ThinLTO/X86/devirt_local_same_guid.ll
+++ b/llvm/test/ThinLTO/X86/devirt_local_same_guid.ll
@@ -37,7 +37,7 @@ define internal i32 @_ZN1B1nEi(ptr %this, i32 %a) #0 {
; Ensures that vtable of B is live so that we will attempt devirt.
define dso_local i32 @use_B(ptr %a) {
entry:
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %a, align 8
+ store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %a, align 8
ret i32 0
}
diff --git a/llvm/test/ThinLTO/X86/lower_type_test_phi.ll b/llvm/test/ThinLTO/X86/lower_type_test_phi.ll
index 722ffe3..81d85f6 100644
--- a/llvm/test/ThinLTO/X86/lower_type_test_phi.ll
+++ b/llvm/test/ThinLTO/X86/lower_type_test_phi.ll
@@ -117,7 +117,7 @@ $_ZTV2D2 = comdat any
define ptr @_Z2b1v() {
entry:
%call = tail call ptr @_Znwm(i64 8)
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV2D1, i64 0, inrange i32 0, i64 2), ptr %call, align 8
+ store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV2D1, i64 0, i32 0, i64 2), ptr %call, align 8
ret ptr %call
}
@@ -126,7 +126,7 @@ declare ptr @_Znwm(i64)
define ptr @_Z2b2v() {
entry:
%call = tail call ptr @_Znwm(i64 8)
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV2D2, i64 0, inrange i32 0, i64 2), ptr %call, align 8
+ store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV2D2, i64 0, i32 0, i64 2), ptr %call, align 8
ret ptr %call
}
diff --git a/llvm/test/ThinLTO/X86/nodevirt-nonpromoted-typeid.ll b/llvm/test/ThinLTO/X86/nodevirt-nonpromoted-typeid.ll
index c6e61ed..7d71c59 100644
--- a/llvm/test/ThinLTO/X86/nodevirt-nonpromoted-typeid.ll
+++ b/llvm/test/ThinLTO/X86/nodevirt-nonpromoted-typeid.ll
@@ -55,7 +55,7 @@ entry:
%this.addr = alloca ptr, align 8
store ptr %this, ptr %this.addr, align 8
%this1 = load ptr, ptr %this.addr
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1D, i64 0, inrange i32 0, i64 2), ptr %this1, align 8
+ store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1D, i64 0, i32 0, i64 2), ptr %this1, align 8
ret void
}
diff --git a/llvm/test/ThinLTO/X86/pseudo-probe-desc-import.ll b/llvm/test/ThinLTO/X86/pseudo-probe-desc-import.ll
index 21dd8c0..f915aac 100644
--- a/llvm/test/ThinLTO/X86/pseudo-probe-desc-import.ll
+++ b/llvm/test/ThinLTO/X86/pseudo-probe-desc-import.ll
@@ -12,8 +12,8 @@
; RUN: llvm-lto -thinlto-action=import %t3.bc -thinlto-index=%t3.index.bc -o /dev/null 2>&1 | FileCheck %s --check-prefix=WARN
-; CHECK-NOT: {i64 6699318081062747564, i64 4294967295, !"foo"
-; CHECK: !{i64 -2624081020897602054, i64 281479271677951, !"main"
+; CHECK-NOT: {i64 6699318081062747564, i64 [[#]], !"foo"
+; CHECK: !{i64 -2624081020897602054, i64 [[#]], !"main"
; WARN: warning: Pseudo-probe ignored: source module '{{.*}}' is compiled with -fpseudo-probe-for-profiling while destination module '{{.*}}' is not
diff --git a/llvm/test/ThinLTO/X86/type_test_noindircall.ll b/llvm/test/ThinLTO/X86/type_test_noindircall.ll
index 2d0faaa..cc85e44 100644
--- a/llvm/test/ThinLTO/X86/type_test_noindircall.ll
+++ b/llvm/test/ThinLTO/X86/type_test_noindircall.ll
@@ -38,8 +38,8 @@ target triple = "x86_64-grtev4-linux-gnu"
define internal void @_ZN12_GLOBAL__N_18RealFileD2Ev(ptr %this) unnamed_addr #0 align 2 {
entry:
; CHECK-IR: store
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTVN12_GLOBAL__N_18RealFileE, i64 0, inrange i32 0, i64 2), ptr %this, align 8
- %0 = tail call i1 @llvm.type.test(ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTVN12_GLOBAL__N_18RealFileE, i64 0, inrange i32 0, i64 2), metadata !"4$09c6cc733fc6accb91e5d7b87cb48f2d")
+ store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTVN12_GLOBAL__N_18RealFileE, i64 0, i32 0, i64 2), ptr %this, align 8
+ %0 = tail call i1 @llvm.type.test(ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTVN12_GLOBAL__N_18RealFileE, i64 0, i32 0, i64 2), metadata !"4$09c6cc733fc6accb91e5d7b87cb48f2d")
tail call void @llvm.assume(i1 %0)
; CHECK-IR-NEXT: ret void
ret void
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
index b6e6b26..a5d4c32 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -3028,7 +3028,7 @@ define bfloat @test_atomicrmw_fadd_bf16_global_system_align4(ptr addrspace(1) %p
define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bfloat %value) #2 {
; CI-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; CI-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CI-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; CI-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CI-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; CI-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3041,7 +3041,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; CI-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; CI-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; CI-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; CI-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; CI-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; CI-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; CI-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; CI-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3058,7 +3058,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; CI-NEXT: ret bfloat [[TMP7]]
;
; GFX9-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX9-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX9-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; GFX9-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX9-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX9-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3071,7 +3071,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX9-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX9-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX9-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX9-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX9-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX9-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX9-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX9-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3088,7 +3088,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX9-NEXT: ret bfloat [[TMP7]]
;
; GFX908-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX908-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX908-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; GFX908-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX908-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX908-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3101,7 +3101,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX908-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX908-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX908-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX908-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX908-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX908-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX908-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX908-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3118,7 +3118,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX908-NEXT: ret bfloat [[TMP7]]
;
; GFX90A-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX90A-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX90A-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6:[0-9]+]]
; GFX90A-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX90A-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX90A-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3131,7 +3131,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX90A-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX90A-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX90A-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX90A-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX90A-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX90A-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX90A-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX90A-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3148,7 +3148,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX90A-NEXT: ret bfloat [[TMP7]]
;
; GFX940-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX940-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX940-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6:[0-9]+]]
; GFX940-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX940-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX940-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3161,7 +3161,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX940-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX940-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX940-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX940-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX940-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX940-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX940-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX940-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3178,7 +3178,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX940-NEXT: ret bfloat [[TMP7]]
;
; GFX11-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX11-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX11-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; GFX11-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX11-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX11-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3191,7 +3191,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX11-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX11-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX11-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX11-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX11-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX11-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX11-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX11-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
diff --git a/llvm/test/Transforms/Attributor/align.ll b/llvm/test/Transforms/Attributor/align.ll
index 5103b6f..9880e53 100644
--- a/llvm/test/Transforms/Attributor/align.ll
+++ b/llvm/test/Transforms/Attributor/align.ll
@@ -11,10 +11,10 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; TEST 1
;;
;.
-; CHECK: @[[A1:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 8
-; CHECK: @[[A2:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 16
-; CHECK: @[[CND:[a-zA-Z0-9_$"\\.-]+]] = external global i1
-; CHECK: @[[G:[a-zA-Z0-9_$"\\.-]+]] = global i8 0, align 32
+; CHECK: @a1 = common global i8 0, align 8
+; CHECK: @a2 = common global i8 0, align 16
+; CHECK: @cnd = external global i1
+; CHECK: @G = global i8 0, align 32
;.
define ptr @test1(ptr align 8 %0) #0 {
; CHECK: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
@@ -158,18 +158,31 @@ define internal ptr @f1(ptr readnone %0) local_unnamed_addr #0 {
; Function Attrs: nounwind readnone ssp uwtable
define ptr @f2(ptr readnone %0) local_unnamed_addr #0 {
-; CHECK: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
-; CHECK-LABEL: define {{[^@]+}}@f2
-; CHECK-SAME: (ptr nofree readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
-; CHECK-NEXT: br i1 [[TMP2]], label [[TMP4:%.*]], label [[TMP3:%.*]]
-; CHECK: 3:
-; CHECK-NEXT: br label [[TMP5:%.*]]
-; CHECK: 4:
-; CHECK-NEXT: br label [[TMP5]]
-; CHECK: 5:
-; CHECK-NEXT: [[TMP6:%.*]] = phi ptr [ [[TMP0]], [[TMP3]] ], [ @a1, [[TMP4]] ]
-; CHECK-NEXT: ret ptr [[TMP6]]
+; TUNIT: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
+; TUNIT-LABEL: define {{[^@]+}}@f2
+; TUNIT-SAME: (ptr nofree readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; TUNIT-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
+; TUNIT-NEXT: br i1 [[TMP2]], label [[TMP4:%.*]], label [[TMP3:%.*]]
+; TUNIT: 3:
+; TUNIT-NEXT: br label [[TMP5:%.*]]
+; TUNIT: 4:
+; TUNIT-NEXT: br label [[TMP5]]
+; TUNIT: 5:
+; TUNIT-NEXT: [[TMP6:%.*]] = phi ptr [ [[TMP0]], [[TMP3]] ], [ @a1, [[TMP4]] ]
+; TUNIT-NEXT: ret ptr [[TMP6]]
+;
+; CGSCC: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
+; CGSCC-LABEL: define {{[^@]+}}@f2
+; CGSCC-SAME: (ptr nofree readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
+; CGSCC-NEXT: br i1 [[TMP2]], label [[TMP4:%.*]], label [[TMP3:%.*]]
+; CGSCC: 3:
+; CGSCC-NEXT: br label [[TMP5:%.*]]
+; CGSCC: 4:
+; CGSCC-NEXT: br label [[TMP5]]
+; CGSCC: 5:
+; CGSCC-NEXT: [[TMP6:%.*]] = phi ptr [ [[TMP0]], [[TMP3]] ], [ @a1, [[TMP4]] ]
+; CGSCC-NEXT: ret ptr [[TMP6]]
;
%2 = icmp eq ptr %0, null
br i1 %2, label %5, label %3
@@ -222,7 +235,7 @@ define align 4 ptr @test7() #0 {
; CGSCC: Function Attrs: mustprogress nofree noinline nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@test7
; CGSCC-SAME: () #[[ATTR1:[0-9]+]] {
-; CGSCC-NEXT: [[C:%.*]] = tail call noundef nonnull align 8 dereferenceable(1) ptr @f1() #[[ATTR14:[0-9]+]]
+; CGSCC-NEXT: [[C:%.*]] = tail call noundef nonnull align 8 dereferenceable(1) ptr @f1() #[[ATTR15:[0-9]+]]
; CGSCC-NEXT: ret ptr [[C]]
;
%c = tail call ptr @f1(ptr align 8 dereferenceable(1) @a1)
@@ -933,7 +946,7 @@ define i32 @musttail_caller_1(ptr %p) {
; TUNIT-NEXT: [[C:%.*]] = load i1, ptr @cnd, align 1
; TUNIT-NEXT: br i1 [[C]], label [[MT:%.*]], label [[EXIT:%.*]]
; TUNIT: mt:
-; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef readonly [[P]]) #[[ATTR12:[0-9]+]]
+; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef readonly [[P]]) #[[ATTR13:[0-9]+]]
; TUNIT-NEXT: ret i32 [[V]]
; TUNIT: exit:
; TUNIT-NEXT: ret i32 0
@@ -944,7 +957,7 @@ define i32 @musttail_caller_1(ptr %p) {
; CGSCC-NEXT: [[C:%.*]] = load i1, ptr @cnd, align 1
; CGSCC-NEXT: br i1 [[C]], label [[MT:%.*]], label [[EXIT:%.*]]
; CGSCC: mt:
-; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef nonnull readonly dereferenceable(4) [[P]]) #[[ATTR15:[0-9]+]]
+; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef nonnull readonly dereferenceable(4) [[P]]) #[[ATTR16:[0-9]+]]
; CGSCC-NEXT: ret i32 [[V]]
; CGSCC: exit:
; CGSCC-NEXT: ret i32 0
@@ -1076,13 +1089,13 @@ define ptr @aligned_8_return_caller(ptr align(16) %a, i1 %c1, i1 %c2) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@aligned_8_return_caller
; TUNIT-SAME: (ptr nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR10]] {
-; TUNIT-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 "no-capture-maybe-returned" [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR13:[0-9]+]]
+; TUNIT-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 "no-capture-maybe-returned" [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR14:[0-9]+]]
; TUNIT-NEXT: ret ptr [[R]]
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@aligned_8_return_caller
; CGSCC-SAME: (ptr nofree readnone align 16 [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR13:[0-9]+]] {
-; CGSCC-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR14]]
+; CGSCC-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR15]]
; CGSCC-NEXT: ret ptr [[R]]
;
%r = call ptr @aligned_8_return(ptr %a, i1 %c1, i1 %c2)
@@ -1101,6 +1114,104 @@ entry:
ret i32 0
}
+define i64 @infer_align_atomicrmw(ptr align 4 %p) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_atomicrmw
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR12:[0-9]+]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[RET:%.*]] = atomicrmw add ptr [[ARRAYIDX1]], i64 4 seq_cst, align 16
+; TUNIT-NEXT: ret i64 [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_atomicrmw
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR14:[0-9]+]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[RET:%.*]] = atomicrmw add ptr [[ARRAYIDX1]], i64 4 seq_cst, align 16
+; CGSCC-NEXT: ret i64 [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %ret = atomicrmw add ptr %arrayidx1, i64 4 seq_cst, align 16
+ ret i64 %ret
+}
+
+define ptr @infer_align_atomicrmw_ptr(ptr align 4 %p, ptr %val) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_atomicrmw_ptr
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[VAL:%.*]]) #[[ATTR12]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[RET:%.*]] = atomicrmw xchg ptr [[ARRAYIDX1]], ptr [[VAL]] seq_cst, align 16
+; TUNIT-NEXT: ret ptr [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_atomicrmw_ptr
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[VAL:%.*]]) #[[ATTR14]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[RET:%.*]] = atomicrmw xchg ptr [[ARRAYIDX1]], ptr [[VAL]] seq_cst, align 16
+; CGSCC-NEXT: ret ptr [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %ret = atomicrmw xchg ptr %arrayidx1, ptr %val seq_cst, align 16
+ ret ptr %ret
+}
+
+define i64 @infer_align_cmpxchg(ptr align 4 %p) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_cmpxchg
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR12]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], i64 4, i64 1 seq_cst seq_cst, align 16
+; TUNIT-NEXT: [[RET:%.*]] = extractvalue { i64, i1 } [[CMPX]], 0
+; TUNIT-NEXT: ret i64 [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_cmpxchg
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR14]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], i64 4, i64 1 seq_cst seq_cst, align 16
+; CGSCC-NEXT: [[RET:%.*]] = extractvalue { i64, i1 } [[CMPX]], 0
+; CGSCC-NEXT: ret i64 [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %cmpx = cmpxchg ptr %arrayidx1, i64 4, i64 1 seq_cst seq_cst, align 16
+ %ret = extractvalue { i64, i1 } %cmpx, 0
+ ret i64 %ret
+}
+
+define ptr @infer_align_cmpxchg_ptr(ptr align 4 %p, ptr %cmp0, ptr %cmp1) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_cmpxchg_ptr
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[CMP0:%.*]], ptr nofree [[CMP1:%.*]]) #[[ATTR12]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], ptr [[CMP0]], ptr [[CMP1]] seq_cst seq_cst, align 16
+; TUNIT-NEXT: [[RET:%.*]] = extractvalue { ptr, i1 } [[CMPX]], 0
+; TUNIT-NEXT: ret ptr [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_cmpxchg_ptr
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[CMP0:%.*]], ptr nofree [[CMP1:%.*]]) #[[ATTR14]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], ptr [[CMP0]], ptr [[CMP1]] seq_cst seq_cst, align 16
+; CGSCC-NEXT: [[RET:%.*]] = extractvalue { ptr, i1 } [[CMPX]], 0
+; CGSCC-NEXT: ret ptr [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %cmpx = cmpxchg ptr %arrayidx1, ptr %cmp0, ptr %cmp1 seq_cst seq_cst, align 16
+ %ret = extractvalue { ptr, i1 } %cmpx, 0
+ ret ptr %ret
+}
+
declare void @implicit_cast_callee(i64)
attributes #0 = { nounwind uwtable noinline }
@@ -1119,8 +1230,9 @@ attributes #2 = { null_pointer_is_valid }
; TUNIT: attributes #[[ATTR9]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(write) }
; TUNIT: attributes #[[ATTR10]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
; TUNIT: attributes #[[ATTR11]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(read) }
-; TUNIT: attributes #[[ATTR12]] = { nofree nosync nounwind willreturn memory(read) }
-; TUNIT: attributes #[[ATTR13]] = { nofree nosync nounwind willreturn }
+; TUNIT: attributes #[[ATTR12]] = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) }
+; TUNIT: attributes #[[ATTR13]] = { nofree nosync nounwind willreturn memory(read) }
+; TUNIT: attributes #[[ATTR14]] = { nofree nosync nounwind willreturn }
;.
; CGSCC: attributes #[[ATTR0]] = { mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable }
; CGSCC: attributes #[[ATTR1]] = { mustprogress nofree noinline nosync nounwind willreturn memory(none) uwtable }
@@ -1136,6 +1248,7 @@ attributes #2 = { null_pointer_is_valid }
; CGSCC: attributes #[[ATTR11]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
; CGSCC: attributes #[[ATTR12]] = { mustprogress nofree nosync nounwind willreturn memory(read) }
; CGSCC: attributes #[[ATTR13]] = { mustprogress nofree nosync nounwind willreturn memory(none) }
-; CGSCC: attributes #[[ATTR14]] = { nofree nosync willreturn }
-; CGSCC: attributes #[[ATTR15]] = { nofree willreturn memory(read) }
+; CGSCC: attributes #[[ATTR14]] = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) }
+; CGSCC: attributes #[[ATTR15]] = { nofree nosync willreturn }
+; CGSCC: attributes #[[ATTR16]] = { nofree willreturn memory(read) }
;.
diff --git a/llvm/test/Transforms/Attributor/nocapture-1.ll b/llvm/test/Transforms/Attributor/nocapture-1.ll
index 7d2f0a1..f61388f 100644
--- a/llvm/test/Transforms/Attributor/nocapture-1.ll
+++ b/llvm/test/Transforms/Attributor/nocapture-1.ll
@@ -524,13 +524,13 @@ define void @test6_2(ptr %x6_2, ptr %y6_2, ptr %z6_2) {
define void @test_cmpxchg(ptr %p) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@test_cmpxchg
-; TUNIT-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR8:[0-9]+]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR8:[0-9]+]] {
; TUNIT-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], i32 0, i32 1 acquire monotonic, align 4
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@test_cmpxchg
-; CGSCC-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR11:[0-9]+]] {
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR11:[0-9]+]] {
; CGSCC-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], i32 0, i32 1 acquire monotonic, align 4
; CGSCC-NEXT: ret void
;
@@ -541,13 +541,13 @@ define void @test_cmpxchg(ptr %p) {
define void @test_cmpxchg_ptr(ptr %p, ptr %q) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@test_cmpxchg_ptr
-; TUNIT-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR8]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull align 8 dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR8]] {
; TUNIT-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], ptr null, ptr [[Q]] acquire monotonic, align 8
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@test_cmpxchg_ptr
-; CGSCC-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR11]] {
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull align 8 dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR11]] {
; CGSCC-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], ptr null, ptr [[Q]] acquire monotonic, align 8
; CGSCC-NEXT: ret void
;
@@ -558,13 +558,13 @@ define void @test_cmpxchg_ptr(ptr %p, ptr %q) {
define void @test_atomicrmw(ptr %p) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@test_atomicrmw
-; TUNIT-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR8]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR8]] {
; TUNIT-NEXT: [[TMP1:%.*]] = atomicrmw add ptr [[P]], i32 1 seq_cst, align 4
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@test_atomicrmw
-; CGSCC-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR11]] {
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR11]] {
; CGSCC-NEXT: [[TMP1:%.*]] = atomicrmw add ptr [[P]], i32 1 seq_cst, align 4
; CGSCC-NEXT: ret void
;
diff --git a/llvm/test/Transforms/Attributor/nofpclass-implied-by-fcmp.ll b/llvm/test/Transforms/Attributor/nofpclass-implied-by-fcmp.ll
index d64f8cf..05c57052e6 100644
--- a/llvm/test/Transforms/Attributor/nofpclass-implied-by-fcmp.ll
+++ b/llvm/test/Transforms/Attributor/nofpclass-implied-by-fcmp.ll
@@ -2641,8 +2641,8 @@ define float @assume_false_smallest_normal(float %arg) {
}
define float @clamp_false_nan(float %arg) {
-; CHECK-LABEL: define nofpclass(nan inf nzero sub norm) float @clamp_false_nan(
-; CHECK-SAME: float returned nofpclass(nan inf nzero sub norm) [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-LABEL: define float @clamp_false_nan(
+; CHECK-SAME: float returned [[ARG:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret float [[ARG]]
;
%fcmp = fcmp false float %arg, 0x7FF8000000000000
@@ -2784,12 +2784,12 @@ define float @clamp_true_smallest_normal_0.0(float %arg) {
}
define float @clamp_true_nan(float %arg) {
-; CHECK-LABEL: define noundef nofpclass(nan inf nzero sub norm) float @clamp_true_nan(
-; CHECK-SAME: float nofpclass(nan inf nzero sub norm) [[ARG:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: ret float 0.000000e+00
+; CHECK-LABEL: define float @clamp_true_nan(
+; CHECK-SAME: float returned [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT: ret float [[ARG]]
;
%fcmp = fcmp true float %arg, 0x7FF8000000000000
- %select = select i1 %fcmp, float 0.0, float %arg
+ %select = select i1 %fcmp, float %arg, float 0.0
ret float %select
}
diff --git a/llvm/test/Transforms/Attributor/nofpclass.ll b/llvm/test/Transforms/Attributor/nofpclass.ll
index 442464c..4df647c 100644
--- a/llvm/test/Transforms/Attributor/nofpclass.ll
+++ b/llvm/test/Transforms/Attributor/nofpclass.ll
@@ -1813,7 +1813,7 @@ define double @fpext(float nofpclass(inf nan) %arg) {
define float @atomicrmw_fadd(ptr %ptr, float nofpclass(inf nan) %val) {
; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CHECK-LABEL: define float @atomicrmw_fadd
-; CHECK-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[PTR:%.*]], float nofpclass(nan inf) [[VAL:%.*]]) #[[ATTR6:[0-9]+]] {
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[PTR:%.*]], float nofpclass(nan inf) [[VAL:%.*]]) #[[ATTR6:[0-9]+]] {
; CHECK-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr [[PTR]], float [[VAL]] seq_cst, align 4
; CHECK-NEXT: ret float [[RESULT]]
;
diff --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/fpclass-test.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/fpclass-test.ll
new file mode 100644
index 0000000..63ab22e
--- /dev/null
+++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/fpclass-test.ll
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -codegenprepare -S -mtriple=aarch64 < %s | FileCheck %s
+
+define i1 @test_is_inf_or_nan(double %arg) {
+; CHECK-LABEL: define i1 @test_is_inf_or_nan(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 519)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp ueq double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_not_inf_or_nan(double %arg) {
+; CHECK-LABEL: define i1 @test_is_not_inf_or_nan(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 504)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp one double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_inf(double %arg) {
+; CHECK-LABEL: define i1 @test_is_inf(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 516)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp oeq double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_not_inf(double %arg) {
+; CHECK-LABEL: define i1 @test_is_not_inf(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 507)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp une double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define <vscale x 2 x i1> @test_vec_is_inf_or_nan(<vscale x 2 x double> %arg) {
+; CHECK-LABEL: define <vscale x 2 x i1> @test_vec_is_inf_or_nan(
+; CHECK-SAME: <vscale x 2 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.is.fpclass.nxv2f64(<vscale x 2 x double> [[ARG]], i32 519)
+; CHECK-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %arg)
+ %ret = fcmp ueq <vscale x 2 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 2 x i1> %ret
+}
+
+define <vscale x 2 x i1> @test_vec_is_not_inf_or_nan(<vscale x 2 x double> %arg) {
+; CHECK-LABEL: define <vscale x 2 x i1> @test_vec_is_not_inf_or_nan(
+; CHECK-SAME: <vscale x 2 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.is.fpclass.nxv2f64(<vscale x 2 x double> [[ARG]], i32 504)
+; CHECK-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %arg)
+ %ret = fcmp one <vscale x 2 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 2 x i1> %ret
+}
+
+define <vscale x 2 x i1> @test_vec_is_inf(<vscale x 2 x double> %arg) {
+; CHECK-LABEL: define <vscale x 2 x i1> @test_vec_is_inf(
+; CHECK-SAME: <vscale x 2 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.is.fpclass.nxv2f64(<vscale x 2 x double> [[ARG]], i32 516)
+; CHECK-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %arg)
+ %ret = fcmp oeq <vscale x 2 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 2 x i1> %ret
+}
+
+define <vscale x 2 x i1> @test_vec_is_not_inf(<vscale x 2 x double> %arg) {
+; CHECK-LABEL: define <vscale x 2 x i1> @test_vec_is_not_inf(
+; CHECK-SAME: <vscale x 2 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.is.fpclass.nxv2f64(<vscale x 2 x double> [[ARG]], i32 507)
+; CHECK-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %arg)
+ %ret = fcmp une <vscale x 2 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 2 x i1> %ret
+}
+
+define i1 @test_fp128_is_inf_or_nan(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_inf_or_nan(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 519)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp ueq fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_not_inf_or_nan(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_not_inf_or_nan(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 504)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp one fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_inf(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_inf(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 516)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp oeq fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_not_inf(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_not_inf(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 507)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp une fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
diff --git a/llvm/test/Transforms/CodeGenPrepare/RISCV/fpclass-test.ll b/llvm/test/Transforms/CodeGenPrepare/RISCV/fpclass-test.ll
new file mode 100644
index 0000000..7c00218
--- /dev/null
+++ b/llvm/test/Transforms/CodeGenPrepare/RISCV/fpclass-test.ll
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -codegenprepare -S -mtriple=riscv64 < %s | FileCheck %s
+
+define i1 @test_is_inf_or_nan(double %arg) {
+; CHECK-LABEL: define i1 @test_is_inf_or_nan(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 519)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp ueq double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_not_inf_or_nan(double %arg) {
+; CHECK-LABEL: define i1 @test_is_not_inf_or_nan(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 504)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp one double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_inf(double %arg) {
+; CHECK-LABEL: define i1 @test_is_inf(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 516)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp oeq double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_not_inf(double %arg) {
+; CHECK-LABEL: define i1 @test_is_not_inf(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 507)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp une double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define <vscale x 4 x i1> @test_vec_is_inf_or_nan(<vscale x 4 x double> %arg) {
+; CHECK-LABEL: define <vscale x 4 x i1> @test_vec_is_inf_or_nan(
+; CHECK-SAME: <vscale x 4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.is.fpclass.nxv4f64(<vscale x 4 x double> [[ARG]], i32 519)
+; CHECK-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %arg)
+ %ret = fcmp ueq <vscale x 4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 4 x i1> %ret
+}
+
+define <vscale x 4 x i1> @test_vec_is_not_inf_or_nan(<vscale x 4 x double> %arg) {
+; CHECK-LABEL: define <vscale x 4 x i1> @test_vec_is_not_inf_or_nan(
+; CHECK-SAME: <vscale x 4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.is.fpclass.nxv4f64(<vscale x 4 x double> [[ARG]], i32 504)
+; CHECK-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %arg)
+ %ret = fcmp one <vscale x 4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 4 x i1> %ret
+}
+
+define <vscale x 4 x i1> @test_vec_is_inf(<vscale x 4 x double> %arg) {
+; CHECK-LABEL: define <vscale x 4 x i1> @test_vec_is_inf(
+; CHECK-SAME: <vscale x 4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.is.fpclass.nxv4f64(<vscale x 4 x double> [[ARG]], i32 516)
+; CHECK-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %arg)
+ %ret = fcmp oeq <vscale x 4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 4 x i1> %ret
+}
+
+define <vscale x 4 x i1> @test_vec_is_not_inf(<vscale x 4 x double> %arg) {
+; CHECK-LABEL: define <vscale x 4 x i1> @test_vec_is_not_inf(
+; CHECK-SAME: <vscale x 4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.is.fpclass.nxv4f64(<vscale x 4 x double> [[ARG]], i32 507)
+; CHECK-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+;
+ %abs = tail call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %arg)
+ %ret = fcmp une <vscale x 4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <vscale x 4 x i1> %ret
+}
+
+define i1 @test_fp128_is_inf_or_nan(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_inf_or_nan(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 519)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp ueq fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_not_inf_or_nan(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_not_inf_or_nan(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 504)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp one fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_inf(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_inf(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 516)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp oeq fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_not_inf(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_not_inf(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 507)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp une fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/fpclass-test.ll b/llvm/test/Transforms/CodeGenPrepare/X86/fpclass-test.ll
new file mode 100644
index 0000000..525caeb
--- /dev/null
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/fpclass-test.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -codegenprepare -S -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
+
+define i1 @test_is_inf_or_nan(double %arg) {
+; CHECK-LABEL: define i1 @test_is_inf_or_nan(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 519)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp ueq double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_not_inf_or_nan(double %arg) {
+; CHECK-LABEL: define i1 @test_is_not_inf_or_nan(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 504)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp one double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_inf(double %arg) {
+; CHECK-LABEL: define i1 @test_is_inf(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 516)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp oeq double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define i1 @test_is_not_inf(double %arg) {
+; CHECK-LABEL: define i1 @test_is_not_inf(
+; CHECK-SAME: double [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[ARG]], i32 507)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call double @llvm.fabs.f64(double %arg)
+ %ret = fcmp une double %abs, 0x7FF0000000000000
+ ret i1 %ret
+}
+
+define <4 x i1> @test_vec_is_inf_or_nan(<4 x double> %arg) {
+; CHECK-LABEL: define <4 x i1> @test_vec_is_inf_or_nan(
+; CHECK-SAME: <4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.is.fpclass.v4f64(<4 x double> [[ARG]], i32 519)
+; CHECK-NEXT: ret <4 x i1> [[TMP1]]
+;
+ %abs = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> %arg)
+ %ret = fcmp ueq <4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <4 x i1> %ret
+}
+
+define <4 x i1> @test_vec_is_not_inf_or_nan(<4 x double> %arg) {
+; CHECK-LABEL: define <4 x i1> @test_vec_is_not_inf_or_nan(
+; CHECK-SAME: <4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.is.fpclass.v4f64(<4 x double> [[ARG]], i32 504)
+; CHECK-NEXT: ret <4 x i1> [[TMP1]]
+;
+ %abs = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> %arg)
+ %ret = fcmp one <4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <4 x i1> %ret
+}
+
+define <4 x i1> @test_vec_is_inf(<4 x double> %arg) {
+; CHECK-LABEL: define <4 x i1> @test_vec_is_inf(
+; CHECK-SAME: <4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.is.fpclass.v4f64(<4 x double> [[ARG]], i32 516)
+; CHECK-NEXT: ret <4 x i1> [[TMP1]]
+;
+ %abs = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> %arg)
+ %ret = fcmp oeq <4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <4 x i1> %ret
+}
+
+define <4 x i1> @test_vec_is_not_inf(<4 x double> %arg) {
+; CHECK-LABEL: define <4 x i1> @test_vec_is_not_inf(
+; CHECK-SAME: <4 x double> [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.is.fpclass.v4f64(<4 x double> [[ARG]], i32 507)
+; CHECK-NEXT: ret <4 x i1> [[TMP1]]
+;
+ %abs = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> %arg)
+ %ret = fcmp une <4 x double> %abs, splat (double 0x7FF0000000000000)
+ ret <4 x i1> %ret
+}
+
+define i1 @test_fp128_is_inf_or_nan(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_inf_or_nan(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 519)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp ueq fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_not_inf_or_nan(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_not_inf_or_nan(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 504)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp one fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_inf(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_inf(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 516)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp oeq fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_fp128_is_not_inf(fp128 %arg) {
+; CHECK-LABEL: define i1 @test_fp128_is_not_inf(
+; CHECK-SAME: fp128 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f128(fp128 [[ARG]], i32 507)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call fp128 @llvm.fabs.f128(fp128 %arg)
+ %ret = fcmp une fp128 %abs, 0xL00000000000000007FFF000000000000
+ ret i1 %ret
+}
+
+define i1 @test_x86_fp80_is_inf_or_nan(x86_fp80 %arg) {
+; CHECK-LABEL: define i1 @test_x86_fp80_is_inf_or_nan(
+; CHECK-SAME: x86_fp80 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[ARG]], i32 519)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call x86_fp80 @llvm.fabs.f80(x86_fp80 %arg)
+ %ret = fcmp ueq x86_fp80 %abs, 0xK7FFF8000000000000000
+ ret i1 %ret
+}
+
+define i1 @test_x86_fp80_is_not_inf_or_nan(x86_fp80 %arg) {
+; CHECK-LABEL: define i1 @test_x86_fp80_is_not_inf_or_nan(
+; CHECK-SAME: x86_fp80 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[ARG]], i32 504)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call x86_fp80 @llvm.fabs.f80(x86_fp80 %arg)
+ %ret = fcmp one x86_fp80 %abs, 0xK7FFF8000000000000000
+ ret i1 %ret
+}
+
+define i1 @test_x86_fp80_is_inf(x86_fp80 %arg) {
+; CHECK-LABEL: define i1 @test_x86_fp80_is_inf(
+; CHECK-SAME: x86_fp80 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[ARG]], i32 516)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call x86_fp80 @llvm.fabs.f80(x86_fp80 %arg)
+ %ret = fcmp oeq x86_fp80 %abs, 0xK7FFF8000000000000000
+ ret i1 %ret
+}
+
+define i1 @test_x86_fp80_is_not_inf(x86_fp80 %arg) {
+; CHECK-LABEL: define i1 @test_x86_fp80_is_not_inf(
+; CHECK-SAME: x86_fp80 [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[ARG]], i32 507)
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %abs = tail call x86_fp80 @llvm.fabs.f80(x86_fp80 %arg)
+ %ret = fcmp une x86_fp80 %abs, 0xK7FFF8000000000000000
+ ret i1 %ret
+}
diff --git a/llvm/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll b/llvm/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll
index 196a104..6d8890d 100644
--- a/llvm/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll
+++ b/llvm/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: opt -mtriple=arm64-darwin-unknown -S -passes=consthoist < %s | FileCheck %s
+; RUN: opt -mtriple=arm64-darwin-unknown -S -passes=consthoist < %s | FileCheck %s --check-prefixes=CHECK,CV
+; RUN: opt -mtriple=arm64-darwin-unknown -S -passes=consthoist -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat < %s | FileCheck %s --check-prefixes=CHECK,CI
define i128 @test1(i128 %a) {
; CHECK-LABEL: define i128 @test1(
@@ -122,13 +123,37 @@ define i64 @sdiv_minsize(i64 %a) minsize {
}
define <2 x i64> @sdiv_v2i64(<2 x i64> %a) {
-; CHECK-LABEL: define <2 x i64> @sdiv_v2i64(
-; CHECK-SAME: <2 x i64> [[A:%.*]]) {
-; CHECK-NEXT: [[TMP1:%.*]] = sdiv <2 x i64> [[A]], <i64 4294967087, i64 4294967087>
-; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i64> [[TMP1]], <i64 4294967087, i64 4294967087>
-; CHECK-NEXT: ret <2 x i64> [[TMP2]]
+; CV-LABEL: define <2 x i64> @sdiv_v2i64(
+; CV-SAME: <2 x i64> [[A:%.*]]) {
+; CV-NEXT: [[TMP1:%.*]] = sdiv <2 x i64> [[A]], <i64 4294967087, i64 4294967087>
+; CV-NEXT: [[TMP2:%.*]] = add <2 x i64> [[TMP1]], <i64 4294967087, i64 4294967087>
+; CV-NEXT: ret <2 x i64> [[TMP2]]
+;
+; CI-LABEL: define <2 x i64> @sdiv_v2i64(
+; CI-SAME: <2 x i64> [[A:%.*]]) {
+; CI-NEXT: [[TMP1:%.*]] = sdiv <2 x i64> [[A]], splat (i64 4294967087)
+; CI-NEXT: [[TMP2:%.*]] = add <2 x i64> [[TMP1]], splat (i64 4294967087)
+; CI-NEXT: ret <2 x i64> [[TMP2]]
;
%1 = sdiv <2 x i64> %a, <i64 4294967087, i64 4294967087>
%2 = add <2 x i64> %1, <i64 4294967087, i64 4294967087>
ret <2 x i64> %2
}
+
+define <vscale x 2 x i64> @sdiv_nxv2i64(<vscale x 2 x i64> %a) {
+; CV-LABEL: define <vscale x 2 x i64> @sdiv_nxv2i64(
+; CV-SAME: <vscale x 2 x i64> [[A:%.*]]) {
+; CV-NEXT: [[TMP1:%.*]] = sdiv <vscale x 2 x i64> [[A]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 4294967087, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+; CV-NEXT: [[TMP2:%.*]] = add <vscale x 2 x i64> [[TMP1]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 4294967087, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+; CV-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+;
+; CI-LABEL: define <vscale x 2 x i64> @sdiv_nxv2i64(
+; CI-SAME: <vscale x 2 x i64> [[A:%.*]]) {
+; CI-NEXT: [[TMP1:%.*]] = sdiv <vscale x 2 x i64> [[A]], splat (i64 4294967087)
+; CI-NEXT: [[TMP2:%.*]] = add <vscale x 2 x i64> [[TMP1]], splat (i64 4294967087)
+; CI-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+;
+ %1 = sdiv <vscale x 2 x i64> %a, splat (i64 4294967087)
+ %2 = add <vscale x 2 x i64> %1, splat (i64 4294967087)
+ ret <vscale x 2 x i64> %2
+}
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
index 9c2b1ec..d9dba92 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
@@ -88,7 +88,6 @@ exit:
ret void
}
-; FIXME: The fakeresume1 here should be marked as musttail.
; Verify that in the resume part resume call is marked with musttail.
; CHECK-LABEL: @f.resume(
; CHECK: musttail call fastcc void @fakeresume1(ptr align 8 null)
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
index 860032b..d0d5005 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
@@ -1,6 +1,6 @@
; Tests that sinked lifetime markers wouldn't provent optimization
; to convert a resuming call to a musttail call.
-; The difference between this and coro-split-musttail5.ll and coro-split-musttail5.ll
+; The difference between this and coro-split-musttail5.ll and coro-split-musttail6.ll
; is that this contains dead instruction generated during the transformation,
; which makes the optimization harder.
; RUN: opt < %s -passes='cgscc(coro-split),simplifycfg,early-cse' -S | FileCheck %s
@@ -8,7 +8,7 @@
declare void @fakeresume1(ptr align 8)
-define void @g() #0 {
+define i64 @g() #0 {
entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
@@ -27,6 +27,11 @@ await.suspend:
%save2 = call token @llvm.coro.save(ptr null)
call fastcc void @fakeresume1(ptr align 8 null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
+
+ ; These (non-trivially) dead instructions are in the way.
+ %gep = getelementptr inbounds i64, ptr %alloc.var, i32 0
+ %foo = ptrtoint ptr %gep to i64
+
switch i8 %suspend2, label %exit [
i8 0, label %await.ready
i8 1, label %exit
@@ -36,8 +41,9 @@ await.ready:
call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
br label %exit
exit:
+ %result = phi i64 [0, %entry], [0, %entry], [%foo, %await.suspend], [%foo, %await.suspend], [%foo, %await.ready]
call i1 @llvm.coro.end(ptr null, i1 false, token none)
- ret void
+ ret i64 %result
}
; Verify that in the resume part resume call is marked with musttail.
@@ -88,7 +94,6 @@ exit:
ret void
}
-; FIXME: The fakeresume1 here should be marked as musttail.
; Verify that in the resume part resume call is marked with musttail.
; CHECK-LABEL: @f.resume(
; CHECK: musttail call fastcc void @fakeresume1(ptr align 8 null)
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
index 8dce9ef..701d867 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
@@ -870,6 +870,33 @@ out:
ret i1 false
}
+define i1 @clamp_high1_or(i32 noundef %a) {
+; CHECK-LABEL: @clamp_high1_or(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[A:%.*]], 5
+; CHECK-NEXT: br i1 [[CMP]], label [[A_GUARD:%.*]], label [[OUT:%.*]]
+; CHECK: a_guard:
+; CHECK-NEXT: [[SEL_CMP:%.*]] = icmp eq i32 [[A]], 5
+; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[A]], 1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[SEL_CMP]], i32 5, i32 [[ADD]]
+; CHECK-NEXT: ret i1 false
+; CHECK: out:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = icmp sle i32 %a, 5
+ br i1 %cmp, label %a_guard, label %out
+
+a_guard:
+ %sel_cmp = icmp eq i32 %a, 5
+ %add = or disjoint i32 %a, 1
+ %sel = select i1 %sel_cmp, i32 5, i32 %add
+ %res = icmp eq i32 %sel, 6
+ ret i1 %res
+out:
+ ret i1 false
+}
+
define i1 @clamp_high2(i32 noundef %a) {
; CHECK-LABEL: @clamp_high2(
; CHECK-NEXT: entry:
@@ -897,6 +924,35 @@ out:
ret i1 false
}
+
+define i1 @clamp_high2_or_disjoint(i32 noundef %a) {
+; CHECK-LABEL: @clamp_high2_or_disjoint(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[A:%.*]], 5
+; CHECK-NEXT: br i1 [[CMP]], label [[A_GUARD:%.*]], label [[OUT:%.*]]
+; CHECK: a_guard:
+; CHECK-NEXT: [[SEL_CMP:%.*]] = icmp ne i32 [[A]], 5
+; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[A]], 1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[SEL_CMP]], i32 [[ADD]], i32 5
+; CHECK-NEXT: ret i1 false
+; CHECK: out:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = icmp sle i32 %a, 5
+ br i1 %cmp, label %a_guard, label %out
+
+a_guard:
+ %sel_cmp = icmp ne i32 %a, 5
+ %add = or disjoint i32 %a, 1
+ %sel = select i1 %sel_cmp, i32 %add, i32 5
+ %res = icmp eq i32 %sel, 6
+ ret i1 %res
+out:
+ ret i1 false
+}
+
+
define i1 @clamp_high3(i32 noundef %a) {
; CHECK-LABEL: @clamp_high3(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
index 101820a..b5337b9 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
@@ -587,6 +587,26 @@ define i1 @test_assume_cmp_with_offset(i64 %idx) {
ret i1 %cmp2
}
+define i1 @test_assume_cmp_with_offset_or(i64 %idx, i1 %other) {
+; CHECK-LABEL: @test_assume_cmp_with_offset_or(
+; CHECK-NEXT: [[IDX_OFF1:%.*]] = or disjoint i64 [[IDX:%.*]], 5
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i64 [[IDX_OFF1]], 10
+; CHECK-NEXT: br i1 [[CMP1]], label [[T:%.*]], label [[F:%.*]]
+; CHECK: T:
+; CHECK-NEXT: ret i1 true
+; CHECK: F:
+; CHECK-NEXT: ret i1 [[CMP2:%.*]]
+;
+ %idx.off1 = or disjoint i64 %idx, 5
+ %cmp1 = icmp ugt i64 %idx.off1, 10
+ br i1 %cmp1, label %T, label %F
+T:
+ %cmp2 = icmp ugt i64 %idx, 2
+ ret i1 %cmp2
+F:
+ ret i1 %other
+}
+
define void @test_cmp_phi(i8 %a) {
; CHECK-LABEL: @test_cmp_phi(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll
index df725b9..696bd55 100644
--- a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll
+++ b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=dfa-jump-threading %s | FileCheck %s
+; RUN: opt -S -passes=dfa-jump-threading -dfa-early-exit-heuristic=false %s | FileCheck %s
; These tests check if selects are unfolded properly for jump threading
; opportunities. There are three different patterns to consider:
diff --git a/llvm/test/Transforms/DFAJumpThreading/unpredictable-heuristic.ll b/llvm/test/Transforms/DFAJumpThreading/unpredictable-heuristic.ll
new file mode 100644
index 0000000..9743f0a
--- /dev/null
+++ b/llvm/test/Transforms/DFAJumpThreading/unpredictable-heuristic.ll
@@ -0,0 +1,124 @@
+; REQUIRES: asserts
+; RUN: opt -S -passes=dfa-jump-threading %s -debug-only=dfa-jump-threading 2>&1 | FileCheck %s
+
+; CHECK-COUNT-3: Exiting early due to unpredictability heuristic.
+
+@.str.1 = private unnamed_addr constant [3 x i8] c"10\00", align 1
+@.str.2 = private unnamed_addr constant [3 x i8] c"30\00", align 1
+@.str.3 = private unnamed_addr constant [3 x i8] c"20\00", align 1
+@.str.4 = private unnamed_addr constant [3 x i8] c"40\00", align 1
+
+define void @test1(i32 noundef %num, i32 noundef %num2) {
+entry:
+ br label %while.body
+
+while.body: ; preds = %entry, %sw.epilog
+ %num.addr.0 = phi i32 [ %num, %entry ], [ %num.addr.1, %sw.epilog ]
+ switch i32 %num.addr.0, label %sw.default [
+ i32 10, label %sw.bb
+ i32 30, label %sw.bb1
+ i32 20, label %sw.bb2
+ i32 40, label %sw.bb3
+ ]
+
+sw.bb: ; preds = %while.body
+ %call.i = tail call i32 @bar(ptr noundef nonnull @.str.1)
+ br label %sw.epilog
+
+sw.bb1: ; preds = %while.body
+ %call.i4 = tail call i32 @bar(ptr noundef nonnull @.str.2)
+ br label %sw.epilog
+
+sw.bb2: ; preds = %while.body
+ %call.i5 = tail call i32 @bar(ptr noundef nonnull @.str.3)
+ br label %sw.epilog
+
+sw.bb3: ; preds = %while.body
+ %call.i6 = tail call i32 @bar(ptr noundef nonnull @.str.4)
+ %call = tail call noundef i32 @foo()
+ %add = add nsw i32 %call, %num2
+ br label %sw.epilog
+
+sw.default: ; preds = %while.body
+ ret void
+
+sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
+ %num.addr.1 = phi i32 [ %add, %sw.bb3 ], [ 40, %sw.bb2 ], [ 20, %sw.bb1 ], [ 30, %sw.bb ]
+ br label %while.body
+}
+
+
+define void @test2(i32 noundef %num, i32 noundef %num2) {
+entry:
+ br label %while.body
+
+while.body: ; preds = %entry, %sw.epilog
+ %num.addr.0 = phi i32 [ %num, %entry ], [ %num.addr.1, %sw.epilog ]
+ switch i32 %num.addr.0, label %sw.default [
+ i32 10, label %sw.epilog
+ i32 30, label %sw.bb1
+ i32 20, label %sw.bb2
+ i32 40, label %sw.bb3
+ ]
+
+sw.bb1: ; preds = %while.body
+ br label %sw.epilog
+
+sw.bb2: ; preds = %while.body
+ br label %sw.epilog
+
+sw.bb3: ; preds = %while.body
+ br label %sw.epilog
+
+sw.default: ; preds = %while.body
+ ret void
+
+sw.epilog: ; preds = %while.body, %sw.bb3, %sw.bb2, %sw.bb1
+ %.str.4.sink = phi ptr [ @.str.4, %sw.bb3 ], [ @.str.3, %sw.bb2 ], [ @.str.2, %sw.bb1 ], [ @.str.1, %while.body ]
+ %num.addr.1 = phi i32 [ %num2, %sw.bb3 ], [ 40, %sw.bb2 ], [ 20, %sw.bb1 ], [ 30, %while.body ]
+ %call.i6 = tail call i32 @bar(ptr noundef nonnull %.str.4.sink)
+ br label %while.body
+}
+
+
+define void @test3(i32 noundef %num, i32 noundef %num2) {
+entry:
+ %add = add nsw i32 %num2, 40
+ br label %while.body
+
+while.body: ; preds = %entry, %sw.epilog
+ %num.addr.0 = phi i32 [ %num, %entry ], [ %num.addr.1, %sw.epilog ]
+ switch i32 %num.addr.0, label %sw.default [
+ i32 10, label %sw.bb
+ i32 30, label %sw.bb1
+ i32 20, label %sw.bb2
+ i32 40, label %sw.bb3
+ ]
+
+sw.bb: ; preds = %while.body
+ %call.i = tail call i32 @bar(ptr noundef nonnull @.str.1)
+ br label %sw.epilog
+
+sw.bb1: ; preds = %while.body
+ %call.i5 = tail call i32 @bar(ptr noundef nonnull @.str.2)
+ br label %sw.epilog
+
+sw.bb2: ; preds = %while.body
+ %call.i6 = tail call i32 @bar(ptr noundef nonnull @.str.3)
+ br label %sw.epilog
+
+sw.bb3: ; preds = %while.body
+ %call.i7 = tail call i32 @bar(ptr noundef nonnull @.str.4)
+ br label %sw.epilog
+
+sw.default: ; preds = %while.body
+ ret void
+
+sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
+ %num.addr.1 = phi i32 [ %add, %sw.bb3 ], [ 40, %sw.bb2 ], [ 20, %sw.bb1 ], [ 30, %sw.bb ]
+ br label %while.body
+}
+
+
+declare noundef i32 @foo()
+declare noundef i32 @bar(ptr nocapture noundef readonly)
diff --git a/llvm/test/Transforms/ExpandLargeDivRem/X86/vector.ll b/llvm/test/Transforms/ExpandLargeDivRem/X86/vector.ll
new file mode 100644
index 0000000..5b7fd00
--- /dev/null
+++ b/llvm/test/Transforms/ExpandLargeDivRem/X86/vector.ll
@@ -0,0 +1,536 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=x86_64-- -expand-large-div-rem -expand-div-rem-bits 128 < %s | FileCheck %s
+; RUN: opt -S -mtriple=x86_64-- -passes=expand-large-div-rem -expand-div-rem-bits 128 < %s | FileCheck %s
+
+define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
+; CHECK-LABEL: define <2 x i129> @sdiv129(
+; CHECK-SAME: <2 x i129> [[A:%.*]], <2 x i129> [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: _udiv-special-cases_udiv-special-cases:
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i129> [[A]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i129> [[B]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = freeze i129 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = freeze i129 [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = ashr i129 [[TMP2]], 128
+; CHECK-NEXT: [[TMP5:%.*]] = ashr i129 [[TMP3]], 128
+; CHECK-NEXT: [[TMP6:%.*]] = xor i129 [[TMP4]], [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i129 [[TMP6]], [[TMP4]]
+; CHECK-NEXT: [[TMP8:%.*]] = xor i129 [[TMP5]], [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i129 [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = xor i129 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i129 [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = freeze i129 [[TMP7]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i129 [[TMP11]], 0
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i129 [[TMP12]], 0
+; CHECK-NEXT: [[TMP15:%.*]] = or i1 [[TMP13]], [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP11]], i1 true)
+; CHECK-NEXT: [[TMP17:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP12]], i1 true)
+; CHECK-NEXT: [[TMP18:%.*]] = sub i129 [[TMP16]], [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ugt i129 [[TMP18]], 128
+; CHECK-NEXT: [[TMP20:%.*]] = select i1 [[TMP15]], i1 true, i1 [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i129 [[TMP18]], 128
+; CHECK-NEXT: [[TMP22:%.*]] = select i1 [[TMP20]], i129 0, i129 [[TMP12]]
+; CHECK-NEXT: [[TMP23:%.*]] = select i1 [[TMP20]], i1 true, i1 [[TMP21]]
+; CHECK-NEXT: br i1 [[TMP23]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK: udiv-loop-exit2:
+; CHECK-NEXT: [[TMP24:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP39:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
+; CHECK-NEXT: [[TMP25:%.*]] = phi i129 [ [[TMP48:%.*]], [[UDIV_BB15]] ], [ [[TMP36:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP26:%.*]] = shl i129 [[TMP25]], 1
+; CHECK-NEXT: [[TMP27:%.*]] = or i129 [[TMP24]], [[TMP26]]
+; CHECK-NEXT: br label [[UDIV_END1]]
+; CHECK: udiv-do-while3:
+; CHECK-NEXT: [[TMP28:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER4:%.*]] ], [ [[TMP39]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP29:%.*]] = phi i129 [ [[TMP46:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP42:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP30:%.*]] = phi i129 [ [[TMP44:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP41:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP31:%.*]] = phi i129 [ [[TMP48]], [[UDIV_PREHEADER4]] ], [ [[TMP36]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP32:%.*]] = shl i129 [[TMP30]], 1
+; CHECK-NEXT: [[TMP33:%.*]] = lshr i129 [[TMP31]], 128
+; CHECK-NEXT: [[TMP34:%.*]] = or i129 [[TMP32]], [[TMP33]]
+; CHECK-NEXT: [[TMP35:%.*]] = shl i129 [[TMP31]], 1
+; CHECK-NEXT: [[TMP36]] = or i129 [[TMP28]], [[TMP35]]
+; CHECK-NEXT: [[TMP37:%.*]] = sub i129 [[TMP45:%.*]], [[TMP34]]
+; CHECK-NEXT: [[TMP38:%.*]] = ashr i129 [[TMP37]], 128
+; CHECK-NEXT: [[TMP39]] = and i129 [[TMP38]], 1
+; CHECK-NEXT: [[TMP40:%.*]] = and i129 [[TMP38]], [[TMP11]]
+; CHECK-NEXT: [[TMP41]] = sub i129 [[TMP34]], [[TMP40]]
+; CHECK-NEXT: [[TMP42]] = add i129 [[TMP29]], -1
+; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i129 [[TMP42]], 0
+; CHECK-NEXT: br i1 [[TMP43]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-preheader4:
+; CHECK-NEXT: [[TMP44]] = lshr i129 [[TMP12]], [[TMP46]]
+; CHECK-NEXT: [[TMP45]] = add i129 [[TMP11]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-bb15:
+; CHECK-NEXT: [[TMP46]] = add i129 [[TMP18]], 1
+; CHECK-NEXT: [[TMP47:%.*]] = sub i129 128, [[TMP18]]
+; CHECK-NEXT: [[TMP48]] = shl i129 [[TMP12]], [[TMP47]]
+; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i129 [[TMP46]], 0
+; CHECK-NEXT: br i1 [[TMP49]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK: udiv-end1:
+; CHECK-NEXT: [[TMP50:%.*]] = phi i129 [ [[TMP27]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP22]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
+; CHECK-NEXT: [[TMP51:%.*]] = xor i129 [[TMP50]], [[TMP10]]
+; CHECK-NEXT: [[TMP52:%.*]] = sub i129 [[TMP51]], [[TMP10]]
+; CHECK-NEXT: [[TMP53:%.*]] = insertelement <2 x i129> poison, i129 [[TMP52]], i64 0
+; CHECK-NEXT: [[TMP54:%.*]] = extractelement <2 x i129> [[A]], i64 1
+; CHECK-NEXT: [[TMP55:%.*]] = extractelement <2 x i129> [[B]], i64 1
+; CHECK-NEXT: [[TMP56:%.*]] = freeze i129 [[TMP54]]
+; CHECK-NEXT: [[TMP57:%.*]] = freeze i129 [[TMP55]]
+; CHECK-NEXT: [[TMP58:%.*]] = ashr i129 [[TMP56]], 128
+; CHECK-NEXT: [[TMP59:%.*]] = ashr i129 [[TMP57]], 128
+; CHECK-NEXT: [[TMP60:%.*]] = xor i129 [[TMP58]], [[TMP56]]
+; CHECK-NEXT: [[TMP61:%.*]] = sub i129 [[TMP60]], [[TMP58]]
+; CHECK-NEXT: [[TMP62:%.*]] = xor i129 [[TMP59]], [[TMP57]]
+; CHECK-NEXT: [[TMP63:%.*]] = sub i129 [[TMP62]], [[TMP59]]
+; CHECK-NEXT: [[TMP64:%.*]] = xor i129 [[TMP59]], [[TMP58]]
+; CHECK-NEXT: [[TMP65:%.*]] = freeze i129 [[TMP63]]
+; CHECK-NEXT: [[TMP66:%.*]] = freeze i129 [[TMP61]]
+; CHECK-NEXT: [[TMP67:%.*]] = icmp eq i129 [[TMP65]], 0
+; CHECK-NEXT: [[TMP68:%.*]] = icmp eq i129 [[TMP66]], 0
+; CHECK-NEXT: [[TMP69:%.*]] = or i1 [[TMP67]], [[TMP68]]
+; CHECK-NEXT: [[TMP70:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP65]], i1 true)
+; CHECK-NEXT: [[TMP71:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP66]], i1 true)
+; CHECK-NEXT: [[TMP72:%.*]] = sub i129 [[TMP70]], [[TMP71]]
+; CHECK-NEXT: [[TMP73:%.*]] = icmp ugt i129 [[TMP72]], 128
+; CHECK-NEXT: [[TMP74:%.*]] = select i1 [[TMP69]], i1 true, i1 [[TMP73]]
+; CHECK-NEXT: [[TMP75:%.*]] = icmp eq i129 [[TMP72]], 128
+; CHECK-NEXT: [[TMP76:%.*]] = select i1 [[TMP74]], i129 0, i129 [[TMP66]]
+; CHECK-NEXT: [[TMP77:%.*]] = select i1 [[TMP74]], i1 true, i1 [[TMP75]]
+; CHECK-NEXT: br i1 [[TMP77]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK: udiv-loop-exit:
+; CHECK-NEXT: [[TMP78:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP93:%.*]], [[UDIV_DO_WHILE:%.*]] ]
+; CHECK-NEXT: [[TMP79:%.*]] = phi i129 [ [[TMP102:%.*]], [[UDIV_BB1]] ], [ [[TMP90:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP80:%.*]] = shl i129 [[TMP79]], 1
+; CHECK-NEXT: [[TMP81:%.*]] = or i129 [[TMP78]], [[TMP80]]
+; CHECK-NEXT: br label [[UDIV_END]]
+; CHECK: udiv-do-while:
+; CHECK-NEXT: [[TMP82:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER:%.*]] ], [ [[TMP93]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP83:%.*]] = phi i129 [ [[TMP100:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP96:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP84:%.*]] = phi i129 [ [[TMP98:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP95:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP85:%.*]] = phi i129 [ [[TMP102]], [[UDIV_PREHEADER]] ], [ [[TMP90]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP86:%.*]] = shl i129 [[TMP84]], 1
+; CHECK-NEXT: [[TMP87:%.*]] = lshr i129 [[TMP85]], 128
+; CHECK-NEXT: [[TMP88:%.*]] = or i129 [[TMP86]], [[TMP87]]
+; CHECK-NEXT: [[TMP89:%.*]] = shl i129 [[TMP85]], 1
+; CHECK-NEXT: [[TMP90]] = or i129 [[TMP82]], [[TMP89]]
+; CHECK-NEXT: [[TMP91:%.*]] = sub i129 [[TMP99:%.*]], [[TMP88]]
+; CHECK-NEXT: [[TMP92:%.*]] = ashr i129 [[TMP91]], 128
+; CHECK-NEXT: [[TMP93]] = and i129 [[TMP92]], 1
+; CHECK-NEXT: [[TMP94:%.*]] = and i129 [[TMP92]], [[TMP65]]
+; CHECK-NEXT: [[TMP95]] = sub i129 [[TMP88]], [[TMP94]]
+; CHECK-NEXT: [[TMP96]] = add i129 [[TMP83]], -1
+; CHECK-NEXT: [[TMP97:%.*]] = icmp eq i129 [[TMP96]], 0
+; CHECK-NEXT: br i1 [[TMP97]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK: udiv-preheader:
+; CHECK-NEXT: [[TMP98]] = lshr i129 [[TMP66]], [[TMP100]]
+; CHECK-NEXT: [[TMP99]] = add i129 [[TMP65]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE]]
+; CHECK: udiv-bb1:
+; CHECK-NEXT: [[TMP100]] = add i129 [[TMP72]], 1
+; CHECK-NEXT: [[TMP101:%.*]] = sub i129 128, [[TMP72]]
+; CHECK-NEXT: [[TMP102]] = shl i129 [[TMP66]], [[TMP101]]
+; CHECK-NEXT: [[TMP103:%.*]] = icmp eq i129 [[TMP100]], 0
+; CHECK-NEXT: br i1 [[TMP103]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK: udiv-end:
+; CHECK-NEXT: [[TMP104:%.*]] = phi i129 [ [[TMP81]], [[UDIV_LOOP_EXIT]] ], [ [[TMP76]], [[UDIV_END1]] ]
+; CHECK-NEXT: [[TMP105:%.*]] = xor i129 [[TMP104]], [[TMP64]]
+; CHECK-NEXT: [[TMP106:%.*]] = sub i129 [[TMP105]], [[TMP64]]
+; CHECK-NEXT: [[TMP107:%.*]] = insertelement <2 x i129> [[TMP53]], i129 [[TMP106]], i64 1
+; CHECK-NEXT: ret <2 x i129> [[TMP107]]
+;
+ %res = sdiv <2 x i129> %a, %b
+ ret <2 x i129> %res
+}
+
+define <2 x i129> @udiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
+; CHECK-LABEL: define <2 x i129> @udiv129(
+; CHECK-SAME: <2 x i129> [[A:%.*]], <2 x i129> [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: _udiv-special-cases_udiv-special-cases:
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i129> [[A]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i129> [[B]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = freeze i129 [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = freeze i129 [[TMP0]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i129 [[TMP2]], 0
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i129 [[TMP3]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP2]], i1 true)
+; CHECK-NEXT: [[TMP8:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP3]], i1 true)
+; CHECK-NEXT: [[TMP9:%.*]] = sub i129 [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i129 [[TMP9]], 128
+; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP6]], i1 true, i1 [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i129 [[TMP9]], 128
+; CHECK-NEXT: [[TMP13:%.*]] = select i1 [[TMP11]], i129 0, i129 [[TMP3]]
+; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP11]], i1 true, i1 [[TMP12]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK: udiv-loop-exit2:
+; CHECK-NEXT: [[TMP15:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP30:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
+; CHECK-NEXT: [[TMP16:%.*]] = phi i129 [ [[TMP39:%.*]], [[UDIV_BB15]] ], [ [[TMP27:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP16]], 1
+; CHECK-NEXT: [[TMP18:%.*]] = or i129 [[TMP15]], [[TMP17]]
+; CHECK-NEXT: br label [[UDIV_END1]]
+; CHECK: udiv-do-while3:
+; CHECK-NEXT: [[TMP19:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER4:%.*]] ], [ [[TMP30]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP20:%.*]] = phi i129 [ [[TMP37:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP33:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = phi i129 [ [[TMP35:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP32:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = phi i129 [ [[TMP39]], [[UDIV_PREHEADER4]] ], [ [[TMP27]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP23:%.*]] = shl i129 [[TMP21]], 1
+; CHECK-NEXT: [[TMP24:%.*]] = lshr i129 [[TMP22]], 128
+; CHECK-NEXT: [[TMP25:%.*]] = or i129 [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP26:%.*]] = shl i129 [[TMP22]], 1
+; CHECK-NEXT: [[TMP27]] = or i129 [[TMP19]], [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = sub i129 [[TMP36:%.*]], [[TMP25]]
+; CHECK-NEXT: [[TMP29:%.*]] = ashr i129 [[TMP28]], 128
+; CHECK-NEXT: [[TMP30]] = and i129 [[TMP29]], 1
+; CHECK-NEXT: [[TMP31:%.*]] = and i129 [[TMP29]], [[TMP2]]
+; CHECK-NEXT: [[TMP32]] = sub i129 [[TMP25]], [[TMP31]]
+; CHECK-NEXT: [[TMP33]] = add i129 [[TMP20]], -1
+; CHECK-NEXT: [[TMP34:%.*]] = icmp eq i129 [[TMP33]], 0
+; CHECK-NEXT: br i1 [[TMP34]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-preheader4:
+; CHECK-NEXT: [[TMP35]] = lshr i129 [[TMP3]], [[TMP37]]
+; CHECK-NEXT: [[TMP36]] = add i129 [[TMP2]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-bb15:
+; CHECK-NEXT: [[TMP37]] = add i129 [[TMP9]], 1
+; CHECK-NEXT: [[TMP38:%.*]] = sub i129 128, [[TMP9]]
+; CHECK-NEXT: [[TMP39]] = shl i129 [[TMP3]], [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i129 [[TMP37]], 0
+; CHECK-NEXT: br i1 [[TMP40]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK: udiv-end1:
+; CHECK-NEXT: [[TMP41:%.*]] = phi i129 [ [[TMP18]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP13]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
+; CHECK-NEXT: [[TMP42:%.*]] = insertelement <2 x i129> poison, i129 [[TMP41]], i64 0
+; CHECK-NEXT: [[TMP43:%.*]] = extractelement <2 x i129> [[A]], i64 1
+; CHECK-NEXT: [[TMP44:%.*]] = extractelement <2 x i129> [[B]], i64 1
+; CHECK-NEXT: [[TMP45:%.*]] = freeze i129 [[TMP44]]
+; CHECK-NEXT: [[TMP46:%.*]] = freeze i129 [[TMP43]]
+; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i129 [[TMP45]], 0
+; CHECK-NEXT: [[TMP48:%.*]] = icmp eq i129 [[TMP46]], 0
+; CHECK-NEXT: [[TMP49:%.*]] = or i1 [[TMP47]], [[TMP48]]
+; CHECK-NEXT: [[TMP50:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP45]], i1 true)
+; CHECK-NEXT: [[TMP51:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP46]], i1 true)
+; CHECK-NEXT: [[TMP52:%.*]] = sub i129 [[TMP50]], [[TMP51]]
+; CHECK-NEXT: [[TMP53:%.*]] = icmp ugt i129 [[TMP52]], 128
+; CHECK-NEXT: [[TMP54:%.*]] = select i1 [[TMP49]], i1 true, i1 [[TMP53]]
+; CHECK-NEXT: [[TMP55:%.*]] = icmp eq i129 [[TMP52]], 128
+; CHECK-NEXT: [[TMP56:%.*]] = select i1 [[TMP54]], i129 0, i129 [[TMP46]]
+; CHECK-NEXT: [[TMP57:%.*]] = select i1 [[TMP54]], i1 true, i1 [[TMP55]]
+; CHECK-NEXT: br i1 [[TMP57]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK: udiv-loop-exit:
+; CHECK-NEXT: [[TMP58:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP73:%.*]], [[UDIV_DO_WHILE:%.*]] ]
+; CHECK-NEXT: [[TMP59:%.*]] = phi i129 [ [[TMP82:%.*]], [[UDIV_BB1]] ], [ [[TMP70:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP60:%.*]] = shl i129 [[TMP59]], 1
+; CHECK-NEXT: [[TMP61:%.*]] = or i129 [[TMP58]], [[TMP60]]
+; CHECK-NEXT: br label [[UDIV_END]]
+; CHECK: udiv-do-while:
+; CHECK-NEXT: [[TMP62:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER:%.*]] ], [ [[TMP73]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP63:%.*]] = phi i129 [ [[TMP80:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP76:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP64:%.*]] = phi i129 [ [[TMP78:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP75:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP65:%.*]] = phi i129 [ [[TMP82]], [[UDIV_PREHEADER]] ], [ [[TMP70]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP66:%.*]] = shl i129 [[TMP64]], 1
+; CHECK-NEXT: [[TMP67:%.*]] = lshr i129 [[TMP65]], 128
+; CHECK-NEXT: [[TMP68:%.*]] = or i129 [[TMP66]], [[TMP67]]
+; CHECK-NEXT: [[TMP69:%.*]] = shl i129 [[TMP65]], 1
+; CHECK-NEXT: [[TMP70]] = or i129 [[TMP62]], [[TMP69]]
+; CHECK-NEXT: [[TMP71:%.*]] = sub i129 [[TMP79:%.*]], [[TMP68]]
+; CHECK-NEXT: [[TMP72:%.*]] = ashr i129 [[TMP71]], 128
+; CHECK-NEXT: [[TMP73]] = and i129 [[TMP72]], 1
+; CHECK-NEXT: [[TMP74:%.*]] = and i129 [[TMP72]], [[TMP45]]
+; CHECK-NEXT: [[TMP75]] = sub i129 [[TMP68]], [[TMP74]]
+; CHECK-NEXT: [[TMP76]] = add i129 [[TMP63]], -1
+; CHECK-NEXT: [[TMP77:%.*]] = icmp eq i129 [[TMP76]], 0
+; CHECK-NEXT: br i1 [[TMP77]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK: udiv-preheader:
+; CHECK-NEXT: [[TMP78]] = lshr i129 [[TMP46]], [[TMP80]]
+; CHECK-NEXT: [[TMP79]] = add i129 [[TMP45]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE]]
+; CHECK: udiv-bb1:
+; CHECK-NEXT: [[TMP80]] = add i129 [[TMP52]], 1
+; CHECK-NEXT: [[TMP81:%.*]] = sub i129 128, [[TMP52]]
+; CHECK-NEXT: [[TMP82]] = shl i129 [[TMP46]], [[TMP81]]
+; CHECK-NEXT: [[TMP83:%.*]] = icmp eq i129 [[TMP80]], 0
+; CHECK-NEXT: br i1 [[TMP83]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK: udiv-end:
+; CHECK-NEXT: [[TMP84:%.*]] = phi i129 [ [[TMP61]], [[UDIV_LOOP_EXIT]] ], [ [[TMP56]], [[UDIV_END1]] ]
+; CHECK-NEXT: [[TMP85:%.*]] = insertelement <2 x i129> [[TMP42]], i129 [[TMP84]], i64 1
+; CHECK-NEXT: ret <2 x i129> [[TMP85]]
+;
+ %res = udiv <2 x i129> %a, %b
+ ret <2 x i129> %res
+}
+
+define <2 x i129> @srem129(<2 x i129> %a, <2 x i129> %b) nounwind {
+; CHECK-LABEL: define <2 x i129> @srem129(
+; CHECK-SAME: <2 x i129> [[A:%.*]], <2 x i129> [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: _udiv-special-cases_udiv-special-cases:
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i129> [[A]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i129> [[B]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = freeze i129 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = freeze i129 [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = ashr i129 [[TMP2]], 128
+; CHECK-NEXT: [[TMP5:%.*]] = ashr i129 [[TMP3]], 128
+; CHECK-NEXT: [[TMP6:%.*]] = xor i129 [[TMP2]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = xor i129 [[TMP3]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i129 [[TMP6]], [[TMP4]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i129 [[TMP7]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = freeze i129 [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i129 [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = freeze i129 [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = freeze i129 [[TMP10]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i129 [[TMP12]], 0
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i129 [[TMP13]], 0
+; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP12]], i1 true)
+; CHECK-NEXT: [[TMP18:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP13]], i1 true)
+; CHECK-NEXT: [[TMP19:%.*]] = sub i129 [[TMP17]], [[TMP18]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ugt i129 [[TMP19]], 128
+; CHECK-NEXT: [[TMP21:%.*]] = select i1 [[TMP16]], i1 true, i1 [[TMP20]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i129 [[TMP19]], 128
+; CHECK-NEXT: [[TMP23:%.*]] = select i1 [[TMP21]], i129 0, i129 [[TMP13]]
+; CHECK-NEXT: [[TMP24:%.*]] = select i1 [[TMP21]], i1 true, i1 [[TMP22]]
+; CHECK-NEXT: br i1 [[TMP24]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK: udiv-loop-exit2:
+; CHECK-NEXT: [[TMP25:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP40:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
+; CHECK-NEXT: [[TMP26:%.*]] = phi i129 [ [[TMP49:%.*]], [[UDIV_BB15]] ], [ [[TMP37:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = shl i129 [[TMP26]], 1
+; CHECK-NEXT: [[TMP28:%.*]] = or i129 [[TMP25]], [[TMP27]]
+; CHECK-NEXT: br label [[UDIV_END1]]
+; CHECK: udiv-do-while3:
+; CHECK-NEXT: [[TMP29:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER4:%.*]] ], [ [[TMP40]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP30:%.*]] = phi i129 [ [[TMP47:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP43:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP31:%.*]] = phi i129 [ [[TMP45:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP42:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP32:%.*]] = phi i129 [ [[TMP49]], [[UDIV_PREHEADER4]] ], [ [[TMP37]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP33:%.*]] = shl i129 [[TMP31]], 1
+; CHECK-NEXT: [[TMP34:%.*]] = lshr i129 [[TMP32]], 128
+; CHECK-NEXT: [[TMP35:%.*]] = or i129 [[TMP33]], [[TMP34]]
+; CHECK-NEXT: [[TMP36:%.*]] = shl i129 [[TMP32]], 1
+; CHECK-NEXT: [[TMP37]] = or i129 [[TMP29]], [[TMP36]]
+; CHECK-NEXT: [[TMP38:%.*]] = sub i129 [[TMP46:%.*]], [[TMP35]]
+; CHECK-NEXT: [[TMP39:%.*]] = ashr i129 [[TMP38]], 128
+; CHECK-NEXT: [[TMP40]] = and i129 [[TMP39]], 1
+; CHECK-NEXT: [[TMP41:%.*]] = and i129 [[TMP39]], [[TMP12]]
+; CHECK-NEXT: [[TMP42]] = sub i129 [[TMP35]], [[TMP41]]
+; CHECK-NEXT: [[TMP43]] = add i129 [[TMP30]], -1
+; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i129 [[TMP43]], 0
+; CHECK-NEXT: br i1 [[TMP44]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-preheader4:
+; CHECK-NEXT: [[TMP45]] = lshr i129 [[TMP13]], [[TMP47]]
+; CHECK-NEXT: [[TMP46]] = add i129 [[TMP12]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-bb15:
+; CHECK-NEXT: [[TMP47]] = add i129 [[TMP19]], 1
+; CHECK-NEXT: [[TMP48:%.*]] = sub i129 128, [[TMP19]]
+; CHECK-NEXT: [[TMP49]] = shl i129 [[TMP13]], [[TMP48]]
+; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i129 [[TMP47]], 0
+; CHECK-NEXT: br i1 [[TMP50]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK: udiv-end1:
+; CHECK-NEXT: [[TMP51:%.*]] = phi i129 [ [[TMP28]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP23]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
+; CHECK-NEXT: [[TMP52:%.*]] = mul i129 [[TMP11]], [[TMP51]]
+; CHECK-NEXT: [[TMP53:%.*]] = sub i129 [[TMP10]], [[TMP52]]
+; CHECK-NEXT: [[TMP54:%.*]] = xor i129 [[TMP53]], [[TMP4]]
+; CHECK-NEXT: [[TMP55:%.*]] = sub i129 [[TMP54]], [[TMP4]]
+; CHECK-NEXT: [[TMP56:%.*]] = insertelement <2 x i129> poison, i129 [[TMP55]], i64 0
+; CHECK-NEXT: [[TMP57:%.*]] = extractelement <2 x i129> [[A]], i64 1
+; CHECK-NEXT: [[TMP58:%.*]] = extractelement <2 x i129> [[B]], i64 1
+; CHECK-NEXT: [[TMP59:%.*]] = freeze i129 [[TMP57]]
+; CHECK-NEXT: [[TMP60:%.*]] = freeze i129 [[TMP58]]
+; CHECK-NEXT: [[TMP61:%.*]] = ashr i129 [[TMP59]], 128
+; CHECK-NEXT: [[TMP62:%.*]] = ashr i129 [[TMP60]], 128
+; CHECK-NEXT: [[TMP63:%.*]] = xor i129 [[TMP59]], [[TMP61]]
+; CHECK-NEXT: [[TMP64:%.*]] = xor i129 [[TMP60]], [[TMP62]]
+; CHECK-NEXT: [[TMP65:%.*]] = sub i129 [[TMP63]], [[TMP61]]
+; CHECK-NEXT: [[TMP66:%.*]] = sub i129 [[TMP64]], [[TMP62]]
+; CHECK-NEXT: [[TMP67:%.*]] = freeze i129 [[TMP65]]
+; CHECK-NEXT: [[TMP68:%.*]] = freeze i129 [[TMP66]]
+; CHECK-NEXT: [[TMP69:%.*]] = freeze i129 [[TMP68]]
+; CHECK-NEXT: [[TMP70:%.*]] = freeze i129 [[TMP67]]
+; CHECK-NEXT: [[TMP71:%.*]] = icmp eq i129 [[TMP69]], 0
+; CHECK-NEXT: [[TMP72:%.*]] = icmp eq i129 [[TMP70]], 0
+; CHECK-NEXT: [[TMP73:%.*]] = or i1 [[TMP71]], [[TMP72]]
+; CHECK-NEXT: [[TMP74:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP69]], i1 true)
+; CHECK-NEXT: [[TMP75:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP70]], i1 true)
+; CHECK-NEXT: [[TMP76:%.*]] = sub i129 [[TMP74]], [[TMP75]]
+; CHECK-NEXT: [[TMP77:%.*]] = icmp ugt i129 [[TMP76]], 128
+; CHECK-NEXT: [[TMP78:%.*]] = select i1 [[TMP73]], i1 true, i1 [[TMP77]]
+; CHECK-NEXT: [[TMP79:%.*]] = icmp eq i129 [[TMP76]], 128
+; CHECK-NEXT: [[TMP80:%.*]] = select i1 [[TMP78]], i129 0, i129 [[TMP70]]
+; CHECK-NEXT: [[TMP81:%.*]] = select i1 [[TMP78]], i1 true, i1 [[TMP79]]
+; CHECK-NEXT: br i1 [[TMP81]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK: udiv-loop-exit:
+; CHECK-NEXT: [[TMP82:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP97:%.*]], [[UDIV_DO_WHILE:%.*]] ]
+; CHECK-NEXT: [[TMP83:%.*]] = phi i129 [ [[TMP106:%.*]], [[UDIV_BB1]] ], [ [[TMP94:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP84:%.*]] = shl i129 [[TMP83]], 1
+; CHECK-NEXT: [[TMP85:%.*]] = or i129 [[TMP82]], [[TMP84]]
+; CHECK-NEXT: br label [[UDIV_END]]
+; CHECK: udiv-do-while:
+; CHECK-NEXT: [[TMP86:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER:%.*]] ], [ [[TMP97]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP87:%.*]] = phi i129 [ [[TMP104:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP100:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP88:%.*]] = phi i129 [ [[TMP102:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP99:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP89:%.*]] = phi i129 [ [[TMP106]], [[UDIV_PREHEADER]] ], [ [[TMP94]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP90:%.*]] = shl i129 [[TMP88]], 1
+; CHECK-NEXT: [[TMP91:%.*]] = lshr i129 [[TMP89]], 128
+; CHECK-NEXT: [[TMP92:%.*]] = or i129 [[TMP90]], [[TMP91]]
+; CHECK-NEXT: [[TMP93:%.*]] = shl i129 [[TMP89]], 1
+; CHECK-NEXT: [[TMP94]] = or i129 [[TMP86]], [[TMP93]]
+; CHECK-NEXT: [[TMP95:%.*]] = sub i129 [[TMP103:%.*]], [[TMP92]]
+; CHECK-NEXT: [[TMP96:%.*]] = ashr i129 [[TMP95]], 128
+; CHECK-NEXT: [[TMP97]] = and i129 [[TMP96]], 1
+; CHECK-NEXT: [[TMP98:%.*]] = and i129 [[TMP96]], [[TMP69]]
+; CHECK-NEXT: [[TMP99]] = sub i129 [[TMP92]], [[TMP98]]
+; CHECK-NEXT: [[TMP100]] = add i129 [[TMP87]], -1
+; CHECK-NEXT: [[TMP101:%.*]] = icmp eq i129 [[TMP100]], 0
+; CHECK-NEXT: br i1 [[TMP101]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK: udiv-preheader:
+; CHECK-NEXT: [[TMP102]] = lshr i129 [[TMP70]], [[TMP104]]
+; CHECK-NEXT: [[TMP103]] = add i129 [[TMP69]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE]]
+; CHECK: udiv-bb1:
+; CHECK-NEXT: [[TMP104]] = add i129 [[TMP76]], 1
+; CHECK-NEXT: [[TMP105:%.*]] = sub i129 128, [[TMP76]]
+; CHECK-NEXT: [[TMP106]] = shl i129 [[TMP70]], [[TMP105]]
+; CHECK-NEXT: [[TMP107:%.*]] = icmp eq i129 [[TMP104]], 0
+; CHECK-NEXT: br i1 [[TMP107]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK: udiv-end:
+; CHECK-NEXT: [[TMP108:%.*]] = phi i129 [ [[TMP85]], [[UDIV_LOOP_EXIT]] ], [ [[TMP80]], [[UDIV_END1]] ]
+; CHECK-NEXT: [[TMP109:%.*]] = mul i129 [[TMP68]], [[TMP108]]
+; CHECK-NEXT: [[TMP110:%.*]] = sub i129 [[TMP67]], [[TMP109]]
+; CHECK-NEXT: [[TMP111:%.*]] = xor i129 [[TMP110]], [[TMP61]]
+; CHECK-NEXT: [[TMP112:%.*]] = sub i129 [[TMP111]], [[TMP61]]
+; CHECK-NEXT: [[TMP113:%.*]] = insertelement <2 x i129> [[TMP56]], i129 [[TMP112]], i64 1
+; CHECK-NEXT: ret <2 x i129> [[TMP113]]
+;
+ %res = srem <2 x i129> %a, %b
+ ret <2 x i129> %res
+}
+
+define <2 x i129> @urem129(<2 x i129> %a, <2 x i129> %b) nounwind {
+; CHECK-LABEL: define <2 x i129> @urem129(
+; CHECK-SAME: <2 x i129> [[A:%.*]], <2 x i129> [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: _udiv-special-cases_udiv-special-cases:
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i129> [[A]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i129> [[B]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = freeze i129 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = freeze i129 [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = freeze i129 [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = freeze i129 [[TMP2]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i129 [[TMP4]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i129 [[TMP5]], 0
+; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP4]], i1 true)
+; CHECK-NEXT: [[TMP10:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP5]], i1 true)
+; CHECK-NEXT: [[TMP11:%.*]] = sub i129 [[TMP9]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i129 [[TMP11]], 128
+; CHECK-NEXT: [[TMP13:%.*]] = select i1 [[TMP8]], i1 true, i1 [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i129 [[TMP11]], 128
+; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP13]], i129 0, i129 [[TMP5]]
+; CHECK-NEXT: [[TMP16:%.*]] = select i1 [[TMP13]], i1 true, i1 [[TMP14]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK: udiv-loop-exit2:
+; CHECK-NEXT: [[TMP17:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP32:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
+; CHECK-NEXT: [[TMP18:%.*]] = phi i129 [ [[TMP41:%.*]], [[UDIV_BB15]] ], [ [[TMP29:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP19:%.*]] = shl i129 [[TMP18]], 1
+; CHECK-NEXT: [[TMP20:%.*]] = or i129 [[TMP17]], [[TMP19]]
+; CHECK-NEXT: br label [[UDIV_END1]]
+; CHECK: udiv-do-while3:
+; CHECK-NEXT: [[TMP21:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER4:%.*]] ], [ [[TMP32]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = phi i129 [ [[TMP39:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP35:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP23:%.*]] = phi i129 [ [[TMP37:%.*]], [[UDIV_PREHEADER4]] ], [ [[TMP34:%.*]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP24:%.*]] = phi i129 [ [[TMP41]], [[UDIV_PREHEADER4]] ], [ [[TMP29]], [[UDIV_DO_WHILE3]] ]
+; CHECK-NEXT: [[TMP25:%.*]] = shl i129 [[TMP23]], 1
+; CHECK-NEXT: [[TMP26:%.*]] = lshr i129 [[TMP24]], 128
+; CHECK-NEXT: [[TMP27:%.*]] = or i129 [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = shl i129 [[TMP24]], 1
+; CHECK-NEXT: [[TMP29]] = or i129 [[TMP21]], [[TMP28]]
+; CHECK-NEXT: [[TMP30:%.*]] = sub i129 [[TMP38:%.*]], [[TMP27]]
+; CHECK-NEXT: [[TMP31:%.*]] = ashr i129 [[TMP30]], 128
+; CHECK-NEXT: [[TMP32]] = and i129 [[TMP31]], 1
+; CHECK-NEXT: [[TMP33:%.*]] = and i129 [[TMP31]], [[TMP4]]
+; CHECK-NEXT: [[TMP34]] = sub i129 [[TMP27]], [[TMP33]]
+; CHECK-NEXT: [[TMP35]] = add i129 [[TMP22]], -1
+; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i129 [[TMP35]], 0
+; CHECK-NEXT: br i1 [[TMP36]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-preheader4:
+; CHECK-NEXT: [[TMP37]] = lshr i129 [[TMP5]], [[TMP39]]
+; CHECK-NEXT: [[TMP38]] = add i129 [[TMP4]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE3]]
+; CHECK: udiv-bb15:
+; CHECK-NEXT: [[TMP39]] = add i129 [[TMP11]], 1
+; CHECK-NEXT: [[TMP40:%.*]] = sub i129 128, [[TMP11]]
+; CHECK-NEXT: [[TMP41]] = shl i129 [[TMP5]], [[TMP40]]
+; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i129 [[TMP39]], 0
+; CHECK-NEXT: br i1 [[TMP42]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK: udiv-end1:
+; CHECK-NEXT: [[TMP43:%.*]] = phi i129 [ [[TMP20]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP15]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
+; CHECK-NEXT: [[TMP44:%.*]] = mul i129 [[TMP3]], [[TMP43]]
+; CHECK-NEXT: [[TMP45:%.*]] = sub i129 [[TMP2]], [[TMP44]]
+; CHECK-NEXT: [[TMP46:%.*]] = insertelement <2 x i129> poison, i129 [[TMP45]], i64 0
+; CHECK-NEXT: [[TMP47:%.*]] = extractelement <2 x i129> [[A]], i64 1
+; CHECK-NEXT: [[TMP48:%.*]] = extractelement <2 x i129> [[B]], i64 1
+; CHECK-NEXT: [[TMP49:%.*]] = freeze i129 [[TMP47]]
+; CHECK-NEXT: [[TMP50:%.*]] = freeze i129 [[TMP48]]
+; CHECK-NEXT: [[TMP51:%.*]] = freeze i129 [[TMP50]]
+; CHECK-NEXT: [[TMP52:%.*]] = freeze i129 [[TMP49]]
+; CHECK-NEXT: [[TMP53:%.*]] = icmp eq i129 [[TMP51]], 0
+; CHECK-NEXT: [[TMP54:%.*]] = icmp eq i129 [[TMP52]], 0
+; CHECK-NEXT: [[TMP55:%.*]] = or i1 [[TMP53]], [[TMP54]]
+; CHECK-NEXT: [[TMP56:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP51]], i1 true)
+; CHECK-NEXT: [[TMP57:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP52]], i1 true)
+; CHECK-NEXT: [[TMP58:%.*]] = sub i129 [[TMP56]], [[TMP57]]
+; CHECK-NEXT: [[TMP59:%.*]] = icmp ugt i129 [[TMP58]], 128
+; CHECK-NEXT: [[TMP60:%.*]] = select i1 [[TMP55]], i1 true, i1 [[TMP59]]
+; CHECK-NEXT: [[TMP61:%.*]] = icmp eq i129 [[TMP58]], 128
+; CHECK-NEXT: [[TMP62:%.*]] = select i1 [[TMP60]], i129 0, i129 [[TMP52]]
+; CHECK-NEXT: [[TMP63:%.*]] = select i1 [[TMP60]], i1 true, i1 [[TMP61]]
+; CHECK-NEXT: br i1 [[TMP63]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK: udiv-loop-exit:
+; CHECK-NEXT: [[TMP64:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP79:%.*]], [[UDIV_DO_WHILE:%.*]] ]
+; CHECK-NEXT: [[TMP65:%.*]] = phi i129 [ [[TMP88:%.*]], [[UDIV_BB1]] ], [ [[TMP76:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP66:%.*]] = shl i129 [[TMP65]], 1
+; CHECK-NEXT: [[TMP67:%.*]] = or i129 [[TMP64]], [[TMP66]]
+; CHECK-NEXT: br label [[UDIV_END]]
+; CHECK: udiv-do-while:
+; CHECK-NEXT: [[TMP68:%.*]] = phi i129 [ 0, [[UDIV_PREHEADER:%.*]] ], [ [[TMP79]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP69:%.*]] = phi i129 [ [[TMP86:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP82:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP70:%.*]] = phi i129 [ [[TMP84:%.*]], [[UDIV_PREHEADER]] ], [ [[TMP81:%.*]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP71:%.*]] = phi i129 [ [[TMP88]], [[UDIV_PREHEADER]] ], [ [[TMP76]], [[UDIV_DO_WHILE]] ]
+; CHECK-NEXT: [[TMP72:%.*]] = shl i129 [[TMP70]], 1
+; CHECK-NEXT: [[TMP73:%.*]] = lshr i129 [[TMP71]], 128
+; CHECK-NEXT: [[TMP74:%.*]] = or i129 [[TMP72]], [[TMP73]]
+; CHECK-NEXT: [[TMP75:%.*]] = shl i129 [[TMP71]], 1
+; CHECK-NEXT: [[TMP76]] = or i129 [[TMP68]], [[TMP75]]
+; CHECK-NEXT: [[TMP77:%.*]] = sub i129 [[TMP85:%.*]], [[TMP74]]
+; CHECK-NEXT: [[TMP78:%.*]] = ashr i129 [[TMP77]], 128
+; CHECK-NEXT: [[TMP79]] = and i129 [[TMP78]], 1
+; CHECK-NEXT: [[TMP80:%.*]] = and i129 [[TMP78]], [[TMP51]]
+; CHECK-NEXT: [[TMP81]] = sub i129 [[TMP74]], [[TMP80]]
+; CHECK-NEXT: [[TMP82]] = add i129 [[TMP69]], -1
+; CHECK-NEXT: [[TMP83:%.*]] = icmp eq i129 [[TMP82]], 0
+; CHECK-NEXT: br i1 [[TMP83]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK: udiv-preheader:
+; CHECK-NEXT: [[TMP84]] = lshr i129 [[TMP52]], [[TMP86]]
+; CHECK-NEXT: [[TMP85]] = add i129 [[TMP51]], -1
+; CHECK-NEXT: br label [[UDIV_DO_WHILE]]
+; CHECK: udiv-bb1:
+; CHECK-NEXT: [[TMP86]] = add i129 [[TMP58]], 1
+; CHECK-NEXT: [[TMP87:%.*]] = sub i129 128, [[TMP58]]
+; CHECK-NEXT: [[TMP88]] = shl i129 [[TMP52]], [[TMP87]]
+; CHECK-NEXT: [[TMP89:%.*]] = icmp eq i129 [[TMP86]], 0
+; CHECK-NEXT: br i1 [[TMP89]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK: udiv-end:
+; CHECK-NEXT: [[TMP90:%.*]] = phi i129 [ [[TMP67]], [[UDIV_LOOP_EXIT]] ], [ [[TMP62]], [[UDIV_END1]] ]
+; CHECK-NEXT: [[TMP91:%.*]] = mul i129 [[TMP50]], [[TMP90]]
+; CHECK-NEXT: [[TMP92:%.*]] = sub i129 [[TMP49]], [[TMP91]]
+; CHECK-NEXT: [[TMP93:%.*]] = insertelement <2 x i129> [[TMP46]], i129 [[TMP92]], i64 1
+; CHECK-NEXT: ret <2 x i129> [[TMP93]]
+;
+ %res = urem <2 x i129> %a, %b
+ ret <2 x i129> %res
+}
+
+
+define <vscale x 2 x i129> @sdiv129_scalable(<vscale x 2 x i129> %a, <vscale x 2 x i129> %b) nounwind {
+; CHECK-LABEL: define <vscale x 2 x i129> @sdiv129_scalable(
+; CHECK-SAME: <vscale x 2 x i129> [[A:%.*]], <vscale x 2 x i129> [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[RES:%.*]] = sdiv <vscale x 2 x i129> [[A]], [[B]]
+; CHECK-NEXT: ret <vscale x 2 x i129> [[RES]]
+;
+ %res = sdiv <vscale x 2 x i129> %a, %b
+ ret <vscale x 2 x i129> %res
+}
diff --git a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll
index 77bbd5f..75130c2 100644
--- a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll
+++ b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll
@@ -27,7 +27,7 @@ define i129 @floattosi129(float %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -256
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -41,7 +41,7 @@ define i129 @floattosi129(float %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294967146
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -150
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -68,7 +68,7 @@ define i129 @doubletosi129(double %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -1152
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -82,7 +82,7 @@ define i129 @doubletosi129(double %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294966221
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -1075
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -110,7 +110,7 @@ define i129 @x86_fp80tosi129(x86_fp80 %a) {
; CHECK-NEXT: br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP10:%.*]] = add i129 [[TMP6]], -16512
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], 4294967167
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
; CHECK-NEXT: br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -124,7 +124,7 @@ define i129 @x86_fp80tosi129(x86_fp80 %a) {
; CHECK-NEXT: [[TMP16:%.*]] = mul i129 [[TMP15]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], 4294950801
+; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], -16495
; CHECK-NEXT: [[TMP18:%.*]] = shl i129 [[TMP8]], [[TMP17]]
; CHECK-NEXT: [[TMP19:%.*]] = mul i129 [[TMP18]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -151,7 +151,7 @@ define i129 @fp128tosi129(fp128 %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -16512
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -165,7 +165,7 @@ define i129 @fp128tosi129(fp128 %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294950801
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -16495
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
diff --git a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll
index 67d9eb5..ed630d7 100644
--- a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll
+++ b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll
@@ -27,7 +27,7 @@ define i129 @floattoui129(float %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -256
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -41,7 +41,7 @@ define i129 @floattoui129(float %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294967146
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -150
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -68,7 +68,7 @@ define i129 @doubletoui129(double %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -1152
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -82,7 +82,7 @@ define i129 @doubletoui129(double %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294966221
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -1075
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -110,7 +110,7 @@ define i129 @x86_fp80toui129(x86_fp80 %a) {
; CHECK-NEXT: br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP10:%.*]] = add i129 [[TMP6]], -16512
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], 4294967167
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
; CHECK-NEXT: br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -124,7 +124,7 @@ define i129 @x86_fp80toui129(x86_fp80 %a) {
; CHECK-NEXT: [[TMP16:%.*]] = mul i129 [[TMP15]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], 4294950801
+; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], -16495
; CHECK-NEXT: [[TMP18:%.*]] = shl i129 [[TMP8]], [[TMP17]]
; CHECK-NEXT: [[TMP19:%.*]] = mul i129 [[TMP18]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -151,7 +151,7 @@ define i129 @fp128toui129(fp128 %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -16512
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -165,7 +165,7 @@ define i129 @fp128toui129(fp128 %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294950801
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -16495
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
diff --git a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-si129tofp.ll b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-si129tofp.ll
index 3961fec..76f5248 100644
--- a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-si129tofp.ll
+++ b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-si129tofp.ll
@@ -15,12 +15,12 @@ define half @si129tohalf(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i32 129, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i32 128, [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP7]], 24
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[TMP3]], 1
@@ -100,12 +100,12 @@ define float @si129tofloat(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i32 129, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i32 128, [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP7]], 24
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[TMP3]], 1
@@ -184,12 +184,12 @@ define double @si129todouble(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i32 129, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i32 128, [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP7]], 53
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 53
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 54, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i32 55, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i32 54, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i32 55, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[TMP3]], 1
@@ -273,12 +273,12 @@ define x86_fp80 @si129tox86_fp80(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i129 129, [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i129 128, [[TMP4]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP7]], 113
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[TMP3]], 1
@@ -357,12 +357,12 @@ define fp128 @si129tofp128(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i129 129, [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i129 128, [[TMP4]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP7]], 113
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[TMP3]], 1
diff --git a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-ui129tofp.ll b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-ui129tofp.ll
index e05ff19..96d87a5 100644
--- a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-ui129tofp.ll
+++ b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-ui129tofp.ll
@@ -15,12 +15,12 @@ define half @ui129tohalf(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i32 129, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i32 128, [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP7]], 24
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[A]], 1
@@ -100,12 +100,12 @@ define float @ui129tofloat(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i32 129, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i32 128, [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP7]], 24
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i32 25, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i32 26, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[A]], 1
@@ -184,12 +184,12 @@ define double @ui129todouble(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i32 129, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i32 128, [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP7]], 53
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 53
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 54, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i32 55, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i32 54, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i32 55, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[A]], 1
@@ -273,12 +273,12 @@ define x86_fp80 @ui129tox86_fp80(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i129 129, [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i129 128, [[TMP4]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP7]], 113
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[A]], 1
@@ -357,12 +357,12 @@ define fp128 @ui129tofp128(i129 %a) {
; CHECK-NEXT: [[TMP5:%.*]] = trunc i129 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = sub i129 129, [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = sub i129 128, [[TMP4]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP7]], 113
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
; CHECK-NEXT: br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
; CHECK: itofp-if-then4:
; CHECK-NEXT: switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
-; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
+; CHECK-NEXT: i129 114, label [[ITOFP_SW_BB:%.*]]
+; CHECK-NEXT: i129 115, label [[ITOFP_SW_EPILOG:%.*]]
; CHECK-NEXT: ]
; CHECK: itofp-sw-bb:
; CHECK-NEXT: [[TMP9:%.*]] = shl i129 [[A]], 1
diff --git a/llvm/test/Transforms/Float2Int/basic.ll b/llvm/test/Transforms/Float2Int/basic.ll
index 2854a83..32f5ca2 100644
--- a/llvm/test/Transforms/Float2Int/basic.ll
+++ b/llvm/test/Transforms/Float2Int/basic.ll
@@ -1,16 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes='float2int' -S | FileCheck %s
+; RUN: opt < %s -passes='float2int' -S | FileCheck %s -check-prefixes=CHECK,NONE
+; RUN: opt < %s -passes='float2int' -S --data-layout="n64" | FileCheck %s -check-prefixes=CHECK,ONLY64
+; RUN: opt < %s -passes='float2int' -S --data-layout="n8:16:32:64"| FileCheck %s -check-prefixes=CHECK,MULTIPLE
+; RUN: opt < %s -passes=float2int -S --data-layout="e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"| FileCheck %s -check-prefixes=CHECK,PR-79158
;
; Positive tests
;
define i16 @simple1(i8 %a) {
-; CHECK-LABEL: @simple1(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
-; CHECK-NEXT: ret i16 [[TMP2]]
+; NONE-LABEL: @simple1(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 1
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
+; NONE-NEXT: ret i16 [[TMP2]]
+;
+; ONLY64-LABEL: @simple1(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = add i64 [[TMP1]], 1
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i16
+; ONLY64-NEXT: ret i16 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple1(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 1
+; MULTIPLE-NEXT: ret i16 [[T21]]
+;
+; PR-79158-LABEL: @simple1(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 1
+; PR-79158-NEXT: ret i16 [[T21]]
;
%t1 = uitofp i8 %a to float
%t2 = fadd float %t1, 1.0
@@ -19,11 +38,29 @@ define i16 @simple1(i8 %a) {
}
define i8 @simple2(i8 %a) {
-; CHECK-LABEL: @simple2(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i8
-; CHECK-NEXT: ret i8 [[TMP2]]
+; NONE-LABEL: @simple2(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i8
+; NONE-NEXT: ret i8 [[TMP2]]
+;
+; ONLY64-LABEL: @simple2(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = sub i64 [[TMP1]], 1
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i8
+; ONLY64-NEXT: ret i8 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple2(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; MULTIPLE-NEXT: [[TMP2:%.*]] = trunc i16 [[T21]] to i8
+; MULTIPLE-NEXT: ret i8 [[TMP2]]
+;
+; PR-79158-LABEL: @simple2(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; PR-79158-NEXT: [[TMP2:%.*]] = trunc i16 [[T21]] to i8
+; PR-79158-NEXT: ret i8 [[TMP2]]
;
%t1 = uitofp i8 %a to float
%t2 = fsub float %t1, 1.0
@@ -32,10 +69,28 @@ define i8 @simple2(i8 %a) {
}
define i32 @simple3(i8 %a) {
-; CHECK-LABEL: @simple3(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
-; CHECK-NEXT: ret i32 [[T21]]
+; NONE-LABEL: @simple3(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
+; NONE-NEXT: ret i32 [[T21]]
+;
+; ONLY64-LABEL: @simple3(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = sub i64 [[TMP1]], 1
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i32
+; ONLY64-NEXT: ret i32 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple3(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i16 [[T21]] to i32
+; MULTIPLE-NEXT: ret i32 [[TMP2]]
+;
+; PR-79158-LABEL: @simple3(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i16 [[T21]] to i32
+; PR-79158-NEXT: ret i32 [[TMP2]]
;
%t1 = uitofp i8 %a to float
%t2 = fsub float %t1, 1.0
@@ -44,11 +99,29 @@ define i32 @simple3(i8 %a) {
}
define i1 @cmp(i8 %a, i8 %b) {
-; CHECK-LABEL: @cmp(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[T31:%.*]] = icmp slt i32 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: ret i1 [[T31]]
+; NONE-LABEL: @cmp(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[T31:%.*]] = icmp slt i32 [[TMP1]], [[TMP2]]
+; NONE-NEXT: ret i1 [[T31]]
+;
+; ONLY64-LABEL: @cmp(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[T31:%.*]] = icmp slt i64 [[TMP1]], [[TMP2]]
+; ONLY64-NEXT: ret i1 [[T31]]
+;
+; MULTIPLE-LABEL: @cmp(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; MULTIPLE-NEXT: [[T31:%.*]] = icmp slt i16 [[TMP1]], [[TMP2]]
+; MULTIPLE-NEXT: ret i1 [[T31]]
+;
+; PR-79158-LABEL: @cmp(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; PR-79158-NEXT: [[T31:%.*]] = icmp slt i16 [[TMP1]], [[TMP2]]
+; PR-79158-NEXT: ret i1 [[T31]]
;
%t1 = uitofp i8 %a to float
%t2 = uitofp i8 %b to float
@@ -70,12 +143,34 @@ define i32 @simple4(i32 %a) {
}
define i32 @simple5(i8 %a, i8 %b) {
-; CHECK-LABEL: @simple5(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
-; CHECK-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
-; CHECK-NEXT: ret i32 [[T42]]
+; NONE-LABEL: @simple5(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
+; NONE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; NONE-NEXT: ret i32 [[T42]]
+;
+; ONLY64-LABEL: @simple5(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[T31:%.*]] = add i64 [[TMP1]], 1
+; ONLY64-NEXT: [[T42:%.*]] = mul i64 [[T31]], [[TMP2]]
+; ONLY64-NEXT: [[TMP3:%.*]] = trunc i64 [[T42]] to i32
+; ONLY64-NEXT: ret i32 [[TMP3]]
+;
+; MULTIPLE-LABEL: @simple5(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; MULTIPLE-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
+; MULTIPLE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; MULTIPLE-NEXT: ret i32 [[T42]]
+;
+; PR-79158-LABEL: @simple5(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; PR-79158-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
+; PR-79158-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; PR-79158-NEXT: ret i32 [[T42]]
;
%t1 = uitofp i8 %a to float
%t2 = uitofp i8 %b to float
@@ -86,12 +181,34 @@ define i32 @simple5(i8 %a, i8 %b) {
}
define i32 @simple6(i8 %a, i8 %b) {
-; CHECK-LABEL: @simple6(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
-; CHECK-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
-; CHECK-NEXT: ret i32 [[T42]]
+; NONE-LABEL: @simple6(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
+; NONE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; NONE-NEXT: ret i32 [[T42]]
+;
+; ONLY64-LABEL: @simple6(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[T31:%.*]] = sub i64 0, [[TMP1]]
+; ONLY64-NEXT: [[T42:%.*]] = mul i64 [[T31]], [[TMP2]]
+; ONLY64-NEXT: [[TMP3:%.*]] = trunc i64 [[T42]] to i32
+; ONLY64-NEXT: ret i32 [[TMP3]]
+;
+; MULTIPLE-LABEL: @simple6(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; MULTIPLE-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
+; MULTIPLE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; MULTIPLE-NEXT: ret i32 [[T42]]
+;
+; PR-79158-LABEL: @simple6(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; PR-79158-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
+; PR-79158-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; PR-79158-NEXT: ret i32 [[T42]]
;
%t1 = uitofp i8 %a to float
%t2 = uitofp i8 %b to float
@@ -105,15 +222,48 @@ define i32 @simple6(i8 %a, i8 %b) {
; cause failure of the other.
define i32 @multi1(i8 %a, i8 %b, i8 %c, float %d) {
-; CHECK-LABEL: @multi1(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
-; CHECK-NEXT: [[X1:%.*]] = add i32 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
-; CHECK-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
-; CHECK-NEXT: [[R:%.*]] = add i32 [[X1]], [[W]]
-; CHECK-NEXT: ret i32 [[R]]
+; NONE-LABEL: @multi1(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; NONE-NEXT: [[X1:%.*]] = add i32 [[TMP1]], [[TMP2]]
+; NONE-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; NONE-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; NONE-NEXT: [[R:%.*]] = add i32 [[X1]], [[W]]
+; NONE-NEXT: ret i32 [[R]]
+;
+; ONLY64-LABEL: @multi1(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; ONLY64-NEXT: [[X1:%.*]] = add i64 [[TMP1]], [[TMP2]]
+; ONLY64-NEXT: [[TMP3:%.*]] = trunc i64 [[X1]] to i32
+; ONLY64-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; ONLY64-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; ONLY64-NEXT: [[R:%.*]] = add i32 [[TMP3]], [[W]]
+; ONLY64-NEXT: ret i32 [[R]]
+;
+; MULTIPLE-LABEL: @multi1(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; MULTIPLE-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; MULTIPLE-NEXT: [[X1:%.*]] = add i16 [[TMP1]], [[TMP2]]
+; MULTIPLE-NEXT: [[TMP3:%.*]] = zext i16 [[X1]] to i32
+; MULTIPLE-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; MULTIPLE-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; MULTIPLE-NEXT: [[R:%.*]] = add i32 [[TMP3]], [[W]]
+; MULTIPLE-NEXT: ret i32 [[R]]
+;
+; PR-79158-LABEL: @multi1(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; PR-79158-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; PR-79158-NEXT: [[X1:%.*]] = add i16 [[TMP1]], [[TMP2]]
+; PR-79158-NEXT: [[TMP3:%.*]] = zext i16 [[X1]] to i32
+; PR-79158-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; PR-79158-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; PR-79158-NEXT: [[R:%.*]] = add i32 [[TMP3]], [[W]]
+; PR-79158-NEXT: ret i32 [[R]]
;
%fa = uitofp i8 %a to float
%fb = uitofp i8 %b to float
@@ -127,11 +277,27 @@ define i32 @multi1(i8 %a, i8 %b, i8 %c, float %d) {
}
define i16 @simple_negzero(i8 %a) {
-; CHECK-LABEL: @simple_negzero(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
-; CHECK-NEXT: ret i16 [[TMP2]]
+; NONE-LABEL: @simple_negzero(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 0
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
+; NONE-NEXT: ret i16 [[TMP2]]
+;
+; ONLY64-LABEL: @simple_negzero(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = add i64 [[TMP1]], 0
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i16
+; ONLY64-NEXT: ret i16 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple_negzero(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 0
+; MULTIPLE-NEXT: ret i16 [[T21]]
+;
+; PR-79158-LABEL: @simple_negzero(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 0
+; PR-79158-NEXT: ret i16 [[T21]]
;
%t1 = uitofp i8 %a to float
%t2 = fadd fast float %t1, -0.0
@@ -140,12 +306,33 @@ define i16 @simple_negzero(i8 %a) {
}
define i32 @simple_negative(i8 %call) {
-; CHECK-LABEL: @simple_negative(
-; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i32
-; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[TMP1]], -3
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[MUL1]] to i8
-; CHECK-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
-; CHECK-NEXT: ret i32 [[CONV3]]
+; NONE-LABEL: @simple_negative(
+; NONE-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i32
+; NONE-NEXT: [[MUL1:%.*]] = mul i32 [[TMP1]], -3
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[MUL1]] to i8
+; NONE-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; NONE-NEXT: ret i32 [[CONV3]]
+;
+; ONLY64-LABEL: @simple_negative(
+; ONLY64-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i64
+; ONLY64-NEXT: [[MUL1:%.*]] = mul i64 [[TMP1]], -3
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[MUL1]] to i8
+; ONLY64-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; ONLY64-NEXT: ret i32 [[CONV3]]
+;
+; MULTIPLE-LABEL: @simple_negative(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i16
+; MULTIPLE-NEXT: [[MUL1:%.*]] = mul i16 [[TMP1]], -3
+; MULTIPLE-NEXT: [[TMP2:%.*]] = trunc i16 [[MUL1]] to i8
+; MULTIPLE-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; MULTIPLE-NEXT: ret i32 [[CONV3]]
+;
+; PR-79158-LABEL: @simple_negative(
+; PR-79158-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i16
+; PR-79158-NEXT: [[MUL1:%.*]] = mul i16 [[TMP1]], -3
+; PR-79158-NEXT: [[TMP2:%.*]] = trunc i16 [[MUL1]] to i8
+; PR-79158-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; PR-79158-NEXT: ret i32 [[CONV3]]
;
%conv1 = sitofp i8 %call to float
%mul = fmul float %conv1, -3.000000e+00
@@ -155,11 +342,27 @@ define i32 @simple_negative(i8 %call) {
}
define i16 @simple_fneg(i8 %a) {
-; CHECK-LABEL: @simple_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = sub i32 0, [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
-; CHECK-NEXT: ret i16 [[TMP2]]
+; NONE-LABEL: @simple_fneg(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = sub i32 0, [[TMP1]]
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
+; NONE-NEXT: ret i16 [[TMP2]]
+;
+; ONLY64-LABEL: @simple_fneg(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = sub i64 0, [[TMP1]]
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i16
+; ONLY64-NEXT: ret i16 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple_fneg(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = sub i16 0, [[TMP1]]
+; MULTIPLE-NEXT: ret i16 [[T21]]
+;
+; PR-79158-LABEL: @simple_fneg(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = sub i16 0, [[TMP1]]
+; PR-79158-NEXT: ret i16 [[T21]]
;
%t1 = uitofp i8 %a to float
%t2 = fneg fast float %t1
diff --git a/llvm/test/Transforms/Float2Int/pr79158.ll b/llvm/test/Transforms/Float2Int/pr79158.ll
new file mode 100644
index 0000000..639a8ac
--- /dev/null
+++ b/llvm/test/Transforms/Float2Int/pr79158.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=float2int -S | FileCheck %s -check-prefixes=CHECK,NONE
+; RUN: opt < %s -passes=float2int -S --data-layout="n64" | FileCheck %s -check-prefixes=CHECK,ONLY64
+; RUN: opt < %s -passes=float2int -S --data-layout="n8:16:32:64"| FileCheck %s -check-prefixes=CHECK,MULTIPLE
+; RUN: opt < %s -passes=float2int -S --data-layout="e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"| FileCheck %s -check-prefixes=CHECK,PR-79158
+
+define i32 @pr79158(i32 %x) {
+; CHECK-LABEL: define i32 @pr79158(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i64
+; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[TMP0]], 4294967295
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[MUL1]] to i32
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+entry:
+ %cmp = icmp sgt i32 %x, 0
+ %conv = uitofp i1 %cmp to double
+ %mul = fmul double %conv, 0x41EFFFFFFFE00000
+ %conv1 = fptoui double %mul to i32
+ ret i32 %conv1
+}
+
+define i32 @pr79158_2(i32 %x) {
+; NONE-LABEL: define i32 @pr79158_2(
+; NONE-SAME: i32 [[X:%.*]]) {
+; NONE-NEXT: entry:
+; NONE-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; NONE-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i32
+; NONE-NEXT: [[MUL1:%.*]] = mul i32 [[TMP0]], 255
+; NONE-NEXT: [[TMP1:%.*]] = trunc i32 [[MUL1]] to i8
+; NONE-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; NONE-NEXT: ret i32 [[CONV2]]
+;
+; ONLY64-LABEL: define i32 @pr79158_2(
+; ONLY64-SAME: i32 [[X:%.*]]) {
+; ONLY64-NEXT: entry:
+; ONLY64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; ONLY64-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i64
+; ONLY64-NEXT: [[MUL1:%.*]] = mul i64 [[TMP0]], 255
+; ONLY64-NEXT: [[TMP1:%.*]] = trunc i64 [[MUL1]] to i8
+; ONLY64-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; ONLY64-NEXT: ret i32 [[CONV2]]
+;
+; MULTIPLE-LABEL: define i32 @pr79158_2(
+; MULTIPLE-SAME: i32 [[X:%.*]]) {
+; MULTIPLE-NEXT: entry:
+; MULTIPLE-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; MULTIPLE-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i16
+; MULTIPLE-NEXT: [[MUL1:%.*]] = mul i16 [[TMP0]], 255
+; MULTIPLE-NEXT: [[TMP1:%.*]] = trunc i16 [[MUL1]] to i8
+; MULTIPLE-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; MULTIPLE-NEXT: ret i32 [[CONV2]]
+;
+; PR-79158-LABEL: define i32 @pr79158_2(
+; PR-79158-SAME: i32 [[X:%.*]]) {
+; PR-79158-NEXT: entry:
+; PR-79158-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; PR-79158-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i16
+; PR-79158-NEXT: [[MUL1:%.*]] = mul i16 [[TMP0]], 255
+; PR-79158-NEXT: [[TMP1:%.*]] = trunc i16 [[MUL1]] to i8
+; PR-79158-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; PR-79158-NEXT: ret i32 [[CONV2]]
+;
+entry:
+ %cmp = icmp sgt i32 %x, 0
+ %conv = uitofp i1 %cmp to float
+ %mul = fmul float %conv, 2.550000e+02
+ %conv1 = fptoui float %mul to i8
+ %conv2 = zext i8 %conv1 to i32
+ ret i32 %conv2
+}
diff --git a/llvm/test/Transforms/FunctionAttrs/noundef.ll b/llvm/test/Transforms/FunctionAttrs/noundef.ll
index 946b562..9ab3708 100644
--- a/llvm/test/Transforms/FunctionAttrs/noundef.ll
+++ b/llvm/test/Transforms/FunctionAttrs/noundef.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes='function-attrs' -S | FileCheck %s
+@g_var = external global [0 x i8]
+
define i32 @test_ret_constant() {
; CHECK-LABEL: define noundef i32 @test_ret_constant(
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
@@ -152,3 +154,15 @@ define i32 @test_ret_constant_msan() sanitize_memory {
;
ret i32 0
}
+
+define i64 @test_trunc_with_constexpr() {
+; CHECK-LABEL: define noundef i64 @test_trunc_with_constexpr(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[ADD:%.*]] = add i32 trunc (i64 sub (i64 0, i64 ptrtoint (ptr @g_var to i64)) to i32), 1
+; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64
+; CHECK-NEXT: ret i64 [[CONV]]
+;
+ %add = add i32 trunc (i64 sub (i64 0, i64 ptrtoint (ptr @g_var to i64)) to i32), 1
+ %conv = sext i32 %add to i64
+ ret i64 %conv
+}
diff --git a/llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll b/llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll
new file mode 100644
index 0000000..b3b5916
--- /dev/null
+++ b/llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll
@@ -0,0 +1,63 @@
+; RUN: opt -S -passes=gvn-hoist < %s | FileCheck %s
+
+define dso_local void @func(i32 noundef %a, ptr noundef %b) !dbg !10 {
+; Check the merged debug location of hoisted GEP
+; CHECK: entry
+; CHECK: %{{[a-zA-Z0-9_]*}} = getelementptr {{.*}} !dbg [[MERGED_DL:![0-9]+]]
+; CHECK: [[MERGED_DL]] = !DILocation(line: 0, scope: !{{[0-9]+}})
+entry:
+ tail call void @llvm.dbg.value(metadata i32 %a, metadata !16, metadata !DIExpression()), !dbg !17
+ tail call void @llvm.dbg.value(metadata ptr %b, metadata !18, metadata !DIExpression()), !dbg !17
+ %tobool = icmp ne i32 %a, 0, !dbg !19
+ br i1 %tobool, label %if.then, label %if.else, !dbg !21
+
+if.then: ; preds = %entry
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 1, !dbg !22
+ store i32 1, ptr %arrayidx, align 4, !dbg !24
+ br label %if.end, !dbg !25
+
+if.else: ; preds = %entry
+ %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 1, !dbg !26
+ store i32 1, ptr %arrayidx1, align 4, !dbg !28
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void, !dbg !29
+}
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3, !4, !5, !6, !7, !8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 19.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "main.c", directory: "/root/llvm-test/GVNHoist")
+!2 = !{i32 7, !"Dwarf Version", i32 5}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{i32 8, !"PIC Level", i32 2}
+!6 = !{i32 7, !"PIE Level", i32 2}
+!7 = !{i32 7, !"uwtable", i32 2}
+!8 = !{i32 7, !"frame-pointer", i32 2}
+!10 = distinct !DISubprogram(name: "func", scope: !1, file: !1, line: 1, type: !11, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !15)
+!11 = !DISubroutineType(types: !12)
+!12 = !{null, !13, !14}
+!13 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!14 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !13, size: 64)
+!15 = !{}
+!16 = !DILocalVariable(name: "a", arg: 1, scope: !10, file: !1, line: 1, type: !13)
+!17 = !DILocation(line: 0, scope: !10)
+!18 = !DILocalVariable(name: "b", arg: 2, scope: !10, file: !1, line: 1, type: !14)
+!19 = !DILocation(line: 2, column: 9, scope: !20)
+!20 = distinct !DILexicalBlock(scope: !10, file: !1, line: 2, column: 9)
+!21 = !DILocation(line: 2, column: 9, scope: !10)
+!22 = !DILocation(line: 3, column: 9, scope: !23)
+!23 = distinct !DILexicalBlock(scope: !20, file: !1, line: 2, column: 12)
+!24 = !DILocation(line: 3, column: 14, scope: !23)
+!25 = !DILocation(line: 4, column: 5, scope: !23)
+!26 = !DILocation(line: 5, column: 9, scope: !27)
+!27 = distinct !DILexicalBlock(scope: !20, file: !1, line: 4, column: 12)
+!28 = !DILocation(line: 5, column: 14, scope: !27)
+!29 = !DILocation(line: 7, column: 1, scope: !10)
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll
index 8ef2d895..baf5c72 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll
@@ -45,13 +45,13 @@ entry:
define hidden void @_ZN1AC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
define hidden void @_ZN1BC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll
index 09979690..47a80f8 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll
@@ -65,13 +65,13 @@ entry:
define hidden void @_ZN1AC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
define hidden void @_ZN1BC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll
index 896675f..50b6825 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll
@@ -45,13 +45,13 @@ entry:
define hidden void @_ZN1AC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
define hidden void @_ZN1BC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll
index bdf97d2..e5dea7a 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll
@@ -67,13 +67,13 @@ entry:
define hidden void @_ZN1AC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
define hidden void @_ZN1BC2Ev(ptr nocapture %this) {
entry:
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll
index 01475c8..04f654c 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll
@@ -39,8 +39,8 @@ entry:
define dso_local i32 @test_A() {
entry:
%call = tail call ptr @_Znwm(i64 8)
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %call, align 8
- %0 = tail call { ptr, i1 } @llvm.type.checked.load(ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), i32 0, metadata !"_ZTS1A"), !nosanitize !9
+ store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %call, align 8
+ %0 = tail call { ptr, i1 } @llvm.type.checked.load(ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), i32 0, metadata !"_ZTS1A"), !nosanitize !9
%1 = extractvalue { ptr, i1 } %0, 0, !nosanitize !9
%call1 = tail call i32 %1(ptr nonnull %call)
ret i32 %call1
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll
index 0fac0fb..dde36d4 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll
@@ -18,7 +18,7 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
define internal void @_ZN1AC2Ev(ptr %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
@@ -43,7 +43,7 @@ entry:
define internal void @_ZN1BC2Ev(ptr %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
@@ -68,7 +68,7 @@ entry:
define internal void @_ZN1CC2Ev(ptr %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1C, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1C, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll
index 7875d38..ac9c362 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll
@@ -12,7 +12,7 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
define internal void @_ZN1AC2Ev(ptr %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
@@ -36,7 +36,7 @@ entry:
define internal void @_ZN1BC2Ev(ptr %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1B, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
@@ -60,7 +60,7 @@ entry:
define internal void @_ZN1CC2Ev(ptr %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1C, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1C, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions.ll
index e00f8a7..0d9b51c 100644
--- a/llvm/test/Transforms/GlobalDCE/virtual-functions.ll
+++ b/llvm/test/Transforms/GlobalDCE/virtual-functions.ll
@@ -38,8 +38,8 @@ entry:
define dso_local i32 @test_A() {
entry:
%call = tail call ptr @_Znwm(i64 8)
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %call, align 8
- %0 = tail call { ptr, i1 } @llvm.type.checked.load(ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), i32 0, metadata !"_ZTS1A"), !nosanitize !9
+ store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %call, align 8
+ %0 = tail call { ptr, i1 } @llvm.type.checked.load(ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), i32 0, metadata !"_ZTS1A"), !nosanitize !9
%1 = extractvalue { ptr, i1 } %0, 0, !nosanitize !9
%call1 = tail call i32 %1(ptr nonnull %call)
ret i32 %call1
diff --git a/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll b/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll
index 2eb761e..c4a45ff 100644
--- a/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll
+++ b/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll
@@ -16,7 +16,7 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
define internal void @_ZN1AC2Ev(ptr %this) {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll b/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll
index 1882107..4c5a448 100644
--- a/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll
+++ b/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll
@@ -23,7 +23,7 @@ define internal i32 @i(ptr inalloca(ptr) %a, ...) {
; CHECK-LABEL: define {{[^@]+}}@i
; CHECK-SAME: (ptr inalloca(ptr) [[A:%.*]], ...) unnamed_addr {
; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[AP]], align 4
; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
; CHECK-NEXT: ret i32 [[L]]
diff --git a/llvm/test/Transforms/GlobalSplit/basic.ll b/llvm/test/Transforms/GlobalSplit/basic.ll
index c47bdd6..eb15157 100644
--- a/llvm/test/Transforms/GlobalSplit/basic.ll
+++ b/llvm/test/Transforms/GlobalSplit/basic.ll
@@ -3,11 +3,11 @@
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK: @vtt = constant [3 x ptr] [ptr @global.0, ptr getelementptr inbounds ([2 x ptr], ptr @global.0, i32 0, i32 1), ptr @global.1]
+; CHECK: @vtt = constant [3 x ptr] [ptr @global.0, ptr getelementptr inbounds (i8, ptr @global.0, i64 8), ptr @global.1]
@vtt = constant [3 x ptr] [
- ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 0, i32 0),
- ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 0, i32 1),
- ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 1, i32 0)
+ ptr getelementptr inrange(0, 16) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 0, i32 0),
+ ptr getelementptr inrange(-8, 8) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 0, i32 1),
+ ptr getelementptr inrange(0, 8) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 1, i32 0)
]
; CHECK-NOT: @global =
@@ -22,25 +22,25 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK: define ptr @f1()
define ptr @f1() {
; CHECK-NEXT: ret ptr @global.0
- ret ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 0, i32 0)
+ ret ptr getelementptr inrange(0, 16) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 0, i32 0)
}
; CHECK: define ptr @f2()
define ptr @f2() {
- ; CHECK-NEXT: ret ptr getelementptr inbounds ([2 x ptr], ptr @global.0, i32 0, i32 1)
- ret ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 0, i32 1)
+ ; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @global.0, i64 8)
+ ret ptr getelementptr inrange(-8, 8) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 0, i32 1)
}
; CHECK: define ptr @f3()
define ptr @f3() {
- ; CHECK-NEXT: ret ptr getelementptr inbounds ([2 x ptr], ptr @global.0, i64 1, i32 0)
- ret ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 0, i32 2)
+ ; CHECK-NEXT: ret ptr getelementptr (i8, ptr @global.0, i64 16)
+ ret ptr getelementptr inrange(-16, 0) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 0, i32 2)
}
; CHECK: define ptr @f4()
define ptr @f4() {
; CHECK-NEXT: ret ptr @global.1
- ret ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 1, i32 0)
+ ret ptr getelementptr inrange(0, 8) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 1, i32 0)
}
define void @foo() {
diff --git a/llvm/test/Transforms/GlobalSplit/non-beneficial.ll b/llvm/test/Transforms/GlobalSplit/non-beneficial.ll
index a7c50c9..5e27623 100644
--- a/llvm/test/Transforms/GlobalSplit/non-beneficial.ll
+++ b/llvm/test/Transforms/GlobalSplit/non-beneficial.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
}
define ptr @f() {
- ret ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 0, i32 0)
+ ret ptr getelementptr inrange(0, 16) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 0, i32 0)
}
define ptr @g() {
diff --git a/llvm/test/Transforms/GlobalSplit/nonlocal.ll b/llvm/test/Transforms/GlobalSplit/nonlocal.ll
index 38169a8..0e63913 100644
--- a/llvm/test/Transforms/GlobalSplit/nonlocal.ll
+++ b/llvm/test/Transforms/GlobalSplit/nonlocal.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
}
define ptr @f() {
- ret ptr getelementptr ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, inrange i32 0, i32 0)
+ ret ptr getelementptr inrange(0, 16) ({ [2 x ptr], [1 x ptr] }, ptr @global, i32 0, i32 0, i32 0)
}
define ptr @g() {
diff --git a/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll b/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll
index 0c05598..55013aa 100644
--- a/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll
+++ b/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll
@@ -2,9 +2,9 @@
; CHECK-LABEL: define {{.*}}@fun
; CHECK: call {{.*}}@fun.cold.1(
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: call {{.*}}@fun.cold.2(
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
define void @fun() {
entry:
br i1 undef, label %A.then, label %A.else
diff --git a/llvm/test/Transforms/IRCE/compound-loop-bound.ll b/llvm/test/Transforms/IRCE/compound-loop-bound.ll
index 0930d19..e50d8c6 100644
--- a/llvm/test/Transforms/IRCE/compound-loop-bound.ll
+++ b/llvm/test/Transforms/IRCE/compound-loop-bound.ll
@@ -16,23 +16,56 @@ define void @incrementing_loop(ptr %arr, ptr %len_ptr, i32 %K, i32 %M) {
; CHECK-NEXT: br i1 [[AND]], label [[PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: preheader:
; CHECK-NEXT: [[SMIN:%.*]] = call i32 @llvm.smin.i32(i32 [[K]], i32 [[M]])
+; CHECK-NEXT: [[SMIN1:%.*]] = call i32 @llvm.smin.i32(i32 [[LEN]], i32 [[M]])
+; CHECK-NEXT: [[SMIN2:%.*]] = call i32 @llvm.smin.i32(i32 [[SMIN1]], i32 [[K]])
+; CHECK-NEXT: [[EXIT_MAINLOOP_AT:%.*]] = call i32 @llvm.smax.i32(i32 [[SMIN2]], i32 0)
+; CHECK-NEXT: [[TMP0:%.*]] = icmp slt i32 0, [[EXIT_MAINLOOP_AT]]
+; CHECK-NEXT: br i1 [[TMP0]], label [[LOOP_PREHEADER:%.*]], label [[MAIN_PSEUDO_EXIT:%.*]]
+; CHECK: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ 0, [[PREHEADER]] ], [ [[IDX_NEXT:%.*]], [[IN_BOUNDS:%.*]] ]
-; CHECK-NEXT: [[IDX_NEXT]] = add i32 [[IDX]], 1
+; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_NEXT:%.*]], [[IN_BOUNDS:%.*]] ], [ 0, [[LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[IDX_NEXT]] = add nsw i32 [[IDX]], 1
; CHECK-NEXT: [[GUARD:%.*]] = icmp slt i32 [[IDX]], [[LEN]]
-; CHECK-NEXT: br i1 [[GUARD]], label [[IN_BOUNDS]], label [[OUT_OF_BOUNDS:%.*]]
+; CHECK-NEXT: br i1 true, label [[IN_BOUNDS]], label [[OUT_OF_BOUNDS_LOOPEXIT3:%.*]]
; CHECK: in.bounds:
; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[ARR]], i32 [[IDX]]
; CHECK-NEXT: store i32 0, ptr [[ADDR]], align 4
; CHECK-NEXT: [[NEXT:%.*]] = icmp slt i32 [[IDX_NEXT]], [[SMIN]]
-; CHECK-NEXT: br i1 [[NEXT]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[IDX_NEXT]], [[EXIT_MAINLOOP_AT]]
+; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP]], label [[MAIN_EXIT_SELECTOR:%.*]]
+; CHECK: main.exit.selector:
+; CHECK-NEXT: [[IDX_NEXT_LCSSA:%.*]] = phi i32 [ [[IDX_NEXT]], [[IN_BOUNDS]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[IDX_NEXT_LCSSA]], [[SMIN]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[MAIN_PSEUDO_EXIT]], label [[EXIT_LOOPEXIT:%.*]]
+; CHECK: main.pseudo.exit:
+; CHECK-NEXT: [[IDX_COPY:%.*]] = phi i32 [ 0, [[PREHEADER]] ], [ [[IDX_NEXT_LCSSA]], [[MAIN_EXIT_SELECTOR]] ]
+; CHECK-NEXT: [[INDVAR_END:%.*]] = phi i32 [ 0, [[PREHEADER]] ], [ [[IDX_NEXT_LCSSA]], [[MAIN_EXIT_SELECTOR]] ]
+; CHECK-NEXT: br label [[POSTLOOP:%.*]]
+; CHECK: out.of.bounds.loopexit:
+; CHECK-NEXT: br label [[OUT_OF_BOUNDS:%.*]]
+; CHECK: out.of.bounds.loopexit3:
+; CHECK-NEXT: br label [[OUT_OF_BOUNDS]]
; CHECK: out.of.bounds:
; CHECK-NEXT: ret void
+; CHECK: exit.loopexit.loopexit:
+; CHECK-NEXT: br label [[EXIT_LOOPEXIT]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
+; CHECK: postloop:
+; CHECK-NEXT: br label [[LOOP_POSTLOOP:%.*]]
+; CHECK: loop.postloop:
+; CHECK-NEXT: [[IDX_POSTLOOP:%.*]] = phi i32 [ [[IDX_COPY]], [[POSTLOOP]] ], [ [[IDX_NEXT_POSTLOOP:%.*]], [[IN_BOUNDS_POSTLOOP:%.*]] ]
+; CHECK-NEXT: [[IDX_NEXT_POSTLOOP]] = add i32 [[IDX_POSTLOOP]], 1
+; CHECK-NEXT: [[GUARD_POSTLOOP:%.*]] = icmp slt i32 [[IDX_POSTLOOP]], [[LEN]]
+; CHECK-NEXT: br i1 [[GUARD_POSTLOOP]], label [[IN_BOUNDS_POSTLOOP]], label [[OUT_OF_BOUNDS_LOOPEXIT:%.*]]
+; CHECK: in.bounds.postloop:
+; CHECK-NEXT: [[ADDR_POSTLOOP:%.*]] = getelementptr i32, ptr [[ARR]], i32 [[IDX_POSTLOOP]]
+; CHECK-NEXT: store i32 0, ptr [[ADDR_POSTLOOP]], align 4
+; CHECK-NEXT: [[NEXT_POSTLOOP:%.*]] = icmp slt i32 [[IDX_NEXT_POSTLOOP]], [[SMIN]]
+; CHECK-NEXT: br i1 [[NEXT_POSTLOOP]], label [[LOOP_POSTLOOP]], label [[EXIT_LOOPEXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP1:![0-9]+]], !loop_constrainer.loop.clone !6
;
entry:
%len = load i32, ptr %len_ptr, !range !0
@@ -78,24 +111,58 @@ define void @decrementing_loop(ptr %arr, ptr %len_ptr, i32 %K, i32 %M) {
; CHECK-NEXT: [[AND:%.*]] = and i1 [[CHECK0]], [[CHECK1]]
; CHECK-NEXT: br i1 [[AND]], label [[PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: preheader:
-; CHECK-NEXT: [[SMIN:%.*]] = call i32 @llvm.smin.i32(i32 [[K]], i32 [[M]])
+; CHECK-NEXT: [[INDVAR_START:%.*]] = call i32 @llvm.smin.i32(i32 [[K]], i32 [[M]])
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDVAR_START]], 1
+; CHECK-NEXT: [[SMIN:%.*]] = call i32 @llvm.smin.i32(i32 [[LEN]], i32 [[TMP0]])
+; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[SMIN]], i32 0)
+; CHECK-NEXT: [[EXIT_PRELOOP_AT:%.*]] = add nsw i32 [[SMAX]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[INDVAR_START]], [[EXIT_PRELOOP_AT]]
+; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP_PRELOOP_PREHEADER:%.*]], label [[PRELOOP_PSEUDO_EXIT:%.*]]
+; CHECK: loop.preloop.preheader:
+; CHECK-NEXT: br label [[LOOP_PRELOOP:%.*]]
+; CHECK: mainloop:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[SMIN]], [[PREHEADER]] ], [ [[IDX_DEC:%.*]], [[IN_BOUNDS:%.*]] ]
-; CHECK-NEXT: [[IDX_DEC]] = sub i32 [[IDX]], 1
+; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_PRELOOP_COPY:%.*]], [[MAINLOOP:%.*]] ], [ [[IDX_DEC:%.*]], [[IN_BOUNDS:%.*]] ]
+; CHECK-NEXT: [[IDX_DEC]] = sub nsw i32 [[IDX]], 1
; CHECK-NEXT: [[GUARD:%.*]] = icmp slt i32 [[IDX]], [[LEN]]
-; CHECK-NEXT: br i1 [[GUARD]], label [[IN_BOUNDS]], label [[OUT_OF_BOUNDS:%.*]]
+; CHECK-NEXT: br i1 true, label [[IN_BOUNDS]], label [[OUT_OF_BOUNDS_LOOPEXIT1:%.*]]
; CHECK: in.bounds:
; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[ARR]], i32 [[IDX]]
; CHECK-NEXT: store i32 0, ptr [[ADDR]], align 4
; CHECK-NEXT: [[NEXT:%.*]] = icmp sgt i32 [[IDX_DEC]], -1
-; CHECK-NEXT: br i1 [[NEXT]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
+; CHECK-NEXT: br i1 [[NEXT]], label [[LOOP]], label [[EXIT_LOOPEXIT_LOOPEXIT:%.*]]
+; CHECK: out.of.bounds.loopexit:
+; CHECK-NEXT: br label [[OUT_OF_BOUNDS:%.*]]
+; CHECK: out.of.bounds.loopexit1:
+; CHECK-NEXT: br label [[OUT_OF_BOUNDS]]
; CHECK: out.of.bounds:
; CHECK-NEXT: ret void
+; CHECK: exit.loopexit.loopexit:
+; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
+; CHECK: loop.preloop:
+; CHECK-NEXT: [[IDX_PRELOOP:%.*]] = phi i32 [ [[IDX_DEC_PRELOOP:%.*]], [[IN_BOUNDS_PRELOOP:%.*]] ], [ [[INDVAR_START]], [[LOOP_PRELOOP_PREHEADER]] ]
+; CHECK-NEXT: [[IDX_DEC_PRELOOP]] = sub i32 [[IDX_PRELOOP]], 1
+; CHECK-NEXT: [[GUARD_PRELOOP:%.*]] = icmp slt i32 [[IDX_PRELOOP]], [[LEN]]
+; CHECK-NEXT: br i1 [[GUARD_PRELOOP]], label [[IN_BOUNDS_PRELOOP]], label [[OUT_OF_BOUNDS_LOOPEXIT:%.*]]
+; CHECK: in.bounds.preloop:
+; CHECK-NEXT: [[ADDR_PRELOOP:%.*]] = getelementptr i32, ptr [[ARR]], i32 [[IDX_PRELOOP]]
+; CHECK-NEXT: store i32 0, ptr [[ADDR_PRELOOP]], align 4
+; CHECK-NEXT: [[NEXT_PRELOOP:%.*]] = icmp sgt i32 [[IDX_DEC_PRELOOP]], -1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[IDX_DEC_PRELOOP]], [[EXIT_PRELOOP_AT]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[LOOP_PRELOOP]], label [[PRELOOP_EXIT_SELECTOR:%.*]], !llvm.loop [[LOOP7:![0-9]+]], !loop_constrainer.loop.clone !6
+; CHECK: preloop.exit.selector:
+; CHECK-NEXT: [[IDX_DEC_PRELOOP_LCSSA:%.*]] = phi i32 [ [[IDX_DEC_PRELOOP]], [[IN_BOUNDS_PRELOOP]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[IDX_DEC_PRELOOP_LCSSA]], -1
+; CHECK-NEXT: br i1 [[TMP3]], label [[PRELOOP_PSEUDO_EXIT]], label [[EXIT_LOOPEXIT]]
+; CHECK: preloop.pseudo.exit:
+; CHECK-NEXT: [[IDX_PRELOOP_COPY]] = phi i32 [ [[INDVAR_START]], [[PREHEADER]] ], [ [[IDX_DEC_PRELOOP_LCSSA]], [[PRELOOP_EXIT_SELECTOR]] ]
+; CHECK-NEXT: [[INDVAR_END:%.*]] = phi i32 [ [[INDVAR_START]], [[PREHEADER]] ], [ [[IDX_DEC_PRELOOP_LCSSA]], [[PRELOOP_EXIT_SELECTOR]] ]
+; CHECK-NEXT: br label [[MAINLOOP]]
;
entry:
%len = load i32, ptr %len_ptr, !range !0
diff --git a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
index ef365d6..38dfd25 100644
--- a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
@@ -17,10 +17,10 @@ define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.va_copy(ptr [[V:%.*]], ptr [[AP]])
-; CHECK-NEXT: call void @llvm.va_end(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
@@ -52,10 +52,10 @@ define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.va_copy(ptr [[V:%.*]], ptr [[AP]])
-; CHECK-NEXT: call void @llvm.va_end(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
diff --git a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
index 9f565de..2d52608 100644
--- a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
+++ b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
@@ -51,7 +51,7 @@ entry:
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
@@ -70,7 +70,7 @@ entry:
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
@@ -84,8 +84,8 @@ entry:
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: call void @llvm.va_copy(ptr [[TMP0]], ptr [[TMP1]])
-; CHECK-NEXT: call void @llvm.va_end(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[TMP0]], ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
diff --git a/llvm/test/Transforms/IROutliner/outlining-no-return-functions.ll b/llvm/test/Transforms/IROutliner/outlining-no-return-functions.ll
index 6b12207..d2c4137 100644
--- a/llvm/test/Transforms/IROutliner/outlining-no-return-functions.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-no-return-functions.ll
@@ -29,19 +29,19 @@ bb1:
; CHECK-LABEL: @f1(
; CHECK-NEXT: bb:
; CHECK-NEXT: call void @outlined_ir_func_0()
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
;
;
; CHECK-LABEL: @f2(
; CHECK-NEXT: bb:
; CHECK-NEXT: call void @outlined_ir_func_0()
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
;
;
; CHECK-LABEL: @f3(
; CHECK-NEXT: bb:
; CHECK-NEXT: call void @outlined_ir_func_0()
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
;
;
; CHECK-LABEL: define internal void @outlined_ir_func_0(
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll b/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll
index 0e21bf8..59a0241 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll
@@ -493,3 +493,55 @@ for.body: ; preds = %for.body.lr.ph, %fo
%cmp = icmp ult i32 %add, %length
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
+
+; Test that we can handle shl and disjoint or in getExtendedOperandRecurrence.
+define void @foo7(i32 %n, ptr %a, i32 %x) {
+; CHECK-LABEL: @foo7(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK: for.body.lr.ph:
+; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], 2
+; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[ADD1]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[N]] to i64
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.cond.cleanup.loopexit:
+; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret void
+; CHECK: for.body:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_LR_PH]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[TMP2]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; CHECK-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], [[TMP1]]
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
+;
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body.lr.ph, label %for.cond.cleanup
+
+for.body.lr.ph: ; preds = %entry
+ %add1 = add nsw i32 %x, 2
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body.lr.ph, %for.body
+ %i.07 = phi i32 [ 0, %for.body.lr.ph ], [ %add2, %for.body ]
+ %mul = shl nsw i32 %i.07, 1
+ %add = or disjoint i32 %mul, 1
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom
+ store i32 %i.07, ptr %arrayidx, align 4
+ %add2 = add nsw i32 %add1, %i.07
+ %cmp = icmp slt i32 %add2, %n
+ br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
+}
diff --git a/llvm/test/Transforms/Inline/RISCV/inline-target-features.ll b/llvm/test/Transforms/Inline/RISCV/inline-target-features.ll
new file mode 100644
index 0000000..b626a22
--- /dev/null
+++ b/llvm/test/Transforms/Inline/RISCV/inline-target-features.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -mtriple=riscv64-unknown-linux-gnu -S -passes=inline | FileCheck %s
+; RUN: opt < %s -mtriple=riscv64-unknown-linux-gnu -S -passes='cgscc(inline)' | FileCheck %s
+; Check that we only inline when we have compatible target attributes.
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+define i32 @foo() #0 {
+entry:
+ %call = call i32 (...) @baz()
+ ret i32 %call
+; CHECK-LABEL: foo
+; CHECK: call i32 (...) @baz()
+}
+declare i32 @baz(...) #0
+
+define i32 @bar() #1 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: bar
+; CHECK: call i32 (...) @baz()
+}
+
+define i32 @qux() #0 {
+entry:
+ %call = call i32 @bar()
+ ret i32 %call
+; CHECK-LABEL: qux
+; CHECK: call i32 @bar()
+}
+
+attributes #0 = { "target-cpu"="generic-rv64" "target-features"="+f,+d" }
+attributes #1 = { "target-cpu"="generic-rv64" "target-features"="+f,+d,+m,+v" }
diff --git a/llvm/test/Transforms/Inline/RISCV/lit.local.cfg b/llvm/test/Transforms/Inline/RISCV/lit.local.cfg
new file mode 100644
index 0000000..1735174
--- /dev/null
+++ b/llvm/test/Transforms/Inline/RISCV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "RISCV" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/Inline/devirtualize-4.ll b/llvm/test/Transforms/Inline/devirtualize-4.ll
index fae3643..d29360f 100644
--- a/llvm/test/Transforms/Inline/devirtualize-4.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-4.ll
@@ -60,7 +60,7 @@ declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
define linkonce_odr dso_local void @_ZN4ImplC2Ev(ptr %this) unnamed_addr align 2 {
entry:
call void @_ZN9InterfaceC2Ev(ptr %this)
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV4Impl, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV4Impl, i64 0, i32 0, i64 2), ptr %this, align 8
%f = getelementptr inbounds %class.Impl, ptr %this, i64 0, i32 1
store i32 3, ptr %f, align 8
ret void
@@ -78,7 +78,7 @@ declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
define linkonce_odr dso_local void @_ZN9InterfaceC2Ev(ptr %this) unnamed_addr align 2 {
entry:
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV9Interface, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV9Interface, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
@@ -185,7 +185,7 @@ entry:
define linkonce_odr void @_ZN1AC2Ev(ptr %this) align 2 {
entry:
- store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, i32 0, i64 2), ptr %this, align 8
ret void
}
diff --git a/llvm/test/Transforms/Inline/update_invoke_prof.ll b/llvm/test/Transforms/Inline/update_invoke_prof.ll
new file mode 100644
index 0000000..5f09c7c
--- /dev/null
+++ b/llvm/test/Transforms/Inline/update_invoke_prof.ll
@@ -0,0 +1,64 @@
+; A pre-commit test to show that branch weights and value profiles associated with invoke are not updated.
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -S | FileCheck %s
+
+declare i32 @__gxx_personality_v0(...)
+
+define void @caller(ptr %func) personality ptr @__gxx_personality_v0 !prof !15 {
+ call void @callee(ptr %func), !prof !16
+ ret void
+}
+
+declare void @inner_callee(ptr %func)
+
+define void @callee(ptr %func) personality ptr @__gxx_personality_v0 !prof !17 {
+ invoke void %func()
+ to label %next unwind label %lpad, !prof !18
+
+next:
+ invoke void @inner_callee(ptr %func)
+ to label %ret unwind label %lpad, !prof !19
+
+lpad:
+ %exn = landingpad {ptr, i32}
+ cleanup
+ unreachable
+
+ret:
+ ret void
+}
+
+!llvm.module.flags = !{!1}
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"SampleProfile"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 10}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 2000}
+!8 = !{!"NumCounts", i64 2}
+!9 = !{!"NumFunctions", i64 2}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+!15 = !{!"function_entry_count", i64 1000}
+!16 = !{!"branch_weights", i64 1000}
+!17 = !{!"function_entry_count", i32 1500}
+!18 = !{!"VP", i32 0, i64 1500, i64 123, i64 900, i64 456, i64 600}
+!19 = !{!"branch_weights", i32 1500}
+
+; CHECK-LABEL: @caller(
+; CHECK: invoke void %func(
+; CHECK-NEXT: {{.*}} !prof ![[PROF1:[0-9]+]]
+; CHECK: invoke void @inner_callee(
+; CHECK-NEXT: {{.*}} !prof ![[PROF2:[0-9]+]]
+
+; CHECK-LABL: @callee(
+; CHECK: invoke void %func(
+; CHECK-NEXT: {{.*}} !prof ![[PROF1]]
+; CHECK: invoke void @inner_callee(
+; CHECK-NEXT: {{.*}} !prof ![[PROF2]]
+
+; CHECK: ![[PROF1]] = !{!"VP", i32 0, i64 1500, i64 123, i64 900, i64 456, i64 600}
+; CHECK: ![[PROF2]] = !{!"branch_weights", i32 1500}
diff --git a/llvm/test/Transforms/Inline/update_value_profile.ll b/llvm/test/Transforms/Inline/update_value_profile.ll
new file mode 100644
index 0000000..daa95e9
--- /dev/null
+++ b/llvm/test/Transforms/Inline/update_value_profile.ll
@@ -0,0 +1,81 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=100 -S | FileCheck %s
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; When 'callee' is inlined into caller1 and caller2, the indirect call value
+; profiles of the inlined copy should be scaled based on callers' profiles,
+; and the indirect call value profiles in 'callee' should be updated.
+define i32 @callee(ptr %0, i32 %1) !prof !20 {
+; CHECK-LABEL: define i32 @callee(
+; CHECK-SAME: ptr [[TMP0:%.*]], i32 [[TMP1:%.*]]) !prof [[PROF0:![0-9]+]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 8
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 [[TMP5]](ptr [[TMP0]], i32 [[TMP1]]), !prof [[PROF1:![0-9]+]]
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+ %3 = load ptr, ptr %0
+ %5 = getelementptr inbounds i8, ptr %3, i64 8
+ %6 = load ptr, ptr %5
+ %7 = tail call i32 %6(ptr %0, i32 %1), !prof !17
+ ret i32 %7
+}
+
+define i32 @caller1(i32 %0) !prof !18 {
+; CHECK-LABEL: define i32 @caller1(
+; CHECK-SAME: i32 [[TMP0:%.*]]) !prof [[PROF2:![0-9]+]] {
+; CHECK-NEXT: [[TMP2:%.*]] = tail call ptr @_Z10createTypei(i32 [[TMP0]])
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 8
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 [[TMP5]](ptr [[TMP2]], i32 [[TMP0]]), !prof [[PROF3:![0-9]+]]
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+ %2 = tail call ptr @_Z10createTypei(i32 %0)
+ %3 = tail call i32 @callee(ptr %2, i32 %0)
+ ret i32 %3
+}
+
+define i32 @caller2(i32 %0) !prof !19 {
+; CHECK-LABEL: define i32 @caller2(
+; CHECK-SAME: i32 [[TMP0:%.*]]) !prof [[PROF4:![0-9]+]] {
+; CHECK-NEXT: [[TMP2:%.*]] = tail call ptr @_Z10createTypei(i32 [[TMP0]])
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 8
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 [[TMP5]](ptr [[TMP2]], i32 [[TMP0]]), !prof [[PROF5:![0-9]+]]
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+ %2 = tail call ptr @_Z10createTypei(i32 %0)
+ %3 = tail call i32 @callee(ptr %2, i32 %0)
+ ret i32 %3
+}
+
+declare ptr @_Z10createTypei(i32)
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 10}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+!17 = !{!"VP", i32 0, i64 1600, i64 123, i64 1000, i64 456, i64 600}
+!18 = !{!"function_entry_count", i64 1000}
+!19 = !{!"function_entry_count", i64 600}
+!20 = !{!"function_entry_count", i64 1700}
+;.
+; CHECK: [[PROF0]] = !{!"function_entry_count", i64 100}
+; CHECK: [[PROF1]] = !{!"VP", i32 0, i64 94, i64 123, i64 58, i64 456, i64 35}
+; CHECK: [[PROF2]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF3]] = !{!"VP", i32 0, i64 941, i64 123, i64 588, i64 456, i64 352}
+; CHECK: [[PROF4]] = !{!"function_entry_count", i64 600}
+; CHECK: [[PROF5]] = !{!"VP", i32 0, i64 564, i64 123, i64 352, i64 456, i64 211}
+;.
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll
index 9b99048..80d8e1b 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll
@@ -39,10 +39,9 @@ define <4 x float> @test_add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -117,10 +116,9 @@ define <2 x double> @test_add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -191,10 +189,9 @@ define <4 x float> @test_sub_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -269,10 +266,9 @@ define <2 x double> @test_sub_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -343,10 +339,9 @@ define <4 x float> @test_mul_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -421,10 +416,9 @@ define <2 x double> @test_mul_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -495,10 +489,9 @@ define <4 x float> @test_div_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -573,10 +566,9 @@ define <2 x double> @test_div_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -981,9 +973,8 @@ define <4 x float> @test_mask_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x flo
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1011,9 +1002,8 @@ define float @test_mask_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1060,9 +1050,8 @@ define <2 x double> @test_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1086,9 +1075,8 @@ define double @test_mask_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x doub
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1129,9 +1117,8 @@ define <4 x float> @test_maskz_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1159,9 +1146,8 @@ define float @test_maskz_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1206,9 +1192,8 @@ define <2 x double> @test_maskz_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1232,9 +1217,8 @@ define double @test_maskz_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1275,9 +1259,8 @@ define <4 x float> @test_mask3_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[C]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1305,9 +1288,8 @@ define float @test_mask3_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1352,9 +1334,8 @@ define <2 x double> @test_mask3_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[C]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1378,9 +1359,8 @@ define double @test_mask3_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1423,9 +1403,8 @@ define <4 x float> @test_mask3_vfmsub_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x float> [[C]], float [[TMP8]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP9]]
;
@@ -1457,9 +1436,8 @@ define float @test_mask3_vfmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: ret float [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1532,9 +1510,8 @@ define <2 x double> @test_mask3_vfmsub_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> [[C]], double [[TMP8]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP9]]
;
@@ -1562,9 +1539,8 @@ define double @test_mask3_vfmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: ret double [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1632,9 +1608,8 @@ define <4 x float> @test_mask3_vfnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x f
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[C]], float [[TMP9]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP10]]
;
@@ -1668,9 +1643,8 @@ define float @test_mask3_vfnmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: ret float [[TMP9]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1747,9 +1721,8 @@ define <2 x double> @test_mask3_vfnmsub_sd(<2 x double> %a, <2 x double> %b, <2
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> [[C]], double [[TMP9]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP10]]
;
@@ -1779,9 +1752,8 @@ define double @test_mask3_vfnmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x do
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: ret double [[TMP9]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll b/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll
index c10c922..906e84b 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll
@@ -39,10 +39,9 @@ define <4 x float> @test_add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -117,10 +116,9 @@ define <2 x double> @test_add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -191,10 +189,9 @@ define <4 x float> @test_sub_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -269,10 +266,9 @@ define <2 x double> @test_sub_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -343,10 +339,9 @@ define <4 x float> @test_mul_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -421,10 +416,9 @@ define <2 x double> @test_mul_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -495,10 +489,9 @@ define <4 x float> @test_div_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -573,10 +566,9 @@ define <2 x double> @test_div_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -981,9 +973,8 @@ define <4 x float> @test_mask_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x flo
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1011,9 +1002,8 @@ define float @test_mask_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1060,9 +1050,8 @@ define <2 x double> @test_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1086,9 +1075,8 @@ define double @test_mask_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x doub
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1129,9 +1117,8 @@ define <4 x float> @test_maskz_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1159,9 +1146,8 @@ define float @test_maskz_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1206,9 +1192,8 @@ define <2 x double> @test_maskz_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1232,9 +1217,8 @@ define double @test_maskz_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1275,9 +1259,8 @@ define <4 x float> @test_mask3_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[C]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1305,9 +1288,8 @@ define float @test_mask3_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1352,9 +1334,8 @@ define <2 x double> @test_mask3_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[C]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1378,9 +1359,8 @@ define double @test_mask3_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1423,9 +1403,8 @@ define <4 x float> @test_mask3_vfmsub_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x float> [[C]], float [[TMP8]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP9]]
;
@@ -1457,9 +1436,8 @@ define float @test_mask3_vfmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: ret float [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1532,9 +1510,8 @@ define <2 x double> @test_mask3_vfmsub_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> [[C]], double [[TMP8]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP9]]
;
@@ -1562,9 +1539,8 @@ define double @test_mask3_vfmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: ret double [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1632,9 +1608,8 @@ define <4 x float> @test_mask3_vfnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x f
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[C]], float [[TMP9]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP10]]
;
@@ -1668,9 +1643,8 @@ define float @test_mask3_vfnmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: ret float [[TMP9]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1747,9 +1721,8 @@ define <2 x double> @test_mask3_vfnmsub_sd(<2 x double> %a, <2 x double> %b, <2
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> [[C]], double [[TMP9]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP10]]
;
@@ -1779,9 +1752,8 @@ define double @test_mask3_vfnmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x do
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: ret double [[TMP9]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index 522dcf8..ec3aca2 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -3986,5 +3986,81 @@ define i32 @add_reduce_sqr_sum_varC_invalid2(i32 %a, i32 %b) {
ret i32 %ab2
}
+define i32 @fold_sext_addition_or_disjoint(i8 %x) {
+; CHECK-LABEL: @fold_sext_addition_or_disjoint(
+; CHECK-NEXT: [[SE:%.*]] = sext i8 [[XX:%.*]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[SE]], 1246
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or disjoint i8 %x, 12
+ %se = sext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_sext_addition_fail(i8 %x) {
+; CHECK-LABEL: @fold_sext_addition_fail(
+; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 12
+; CHECK-NEXT: [[SE:%.*]] = sext i8 [[XX]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[SE]], 1234
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or i8 %x, 12
+ %se = sext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_or_disjoint(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_or_disjoint(
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX:%.*]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SE]], 1246
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or disjoint i8 %x, 12
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_or_disjoint2(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_or_disjoint2(
+; CHECK-NEXT: [[XX:%.*]] = add nuw i8 [[X:%.*]], 4
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX]] to i32
+; CHECK-NEXT: ret i32 [[SE]]
+;
+ %xx = or disjoint i8 %x, 18
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, -14
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_fail(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_fail(
+; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 12
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SE]], 1234
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or i8 %x, 12
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_fail2(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_fail2(
+; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 18
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[SE]], -14
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or i8 %x, 18
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, -14
+ ret i32 %r
+}
+
+
declare void @llvm.assume(i1)
declare void @fake_func(i32)
diff --git a/llvm/test/Transforms/InstCombine/allow-checks.ll b/llvm/test/Transforms/InstCombine/allow-checks.ll
new file mode 100644
index 0000000..873b7c3
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/allow-checks.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s --implicit-check-not="call i1 @llvm.allow"
+
+define i1 @test_runtime() {
+; CHECK-LABEL: @test_runtime(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[HOT:%.*]] = call i1 @llvm.allow.runtime.check(metadata !"test")
+; CHECK-NEXT: ret i1 [[HOT]]
+;
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test")
+ ret i1 %allow
+}
+
+define void @test_runtime_void() {
+; CHECK-LABEL: @test_runtime_void(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret void
+;
+entry:
+ %allow = call i1 @llvm.allow.runtime.check(metadata !"test")
+ ret void
+}
+
+define i1 @test_ubsan() {
+; CHECK-LABEL: @test_ubsan(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[HOT:%.*]] = call i1 @llvm.allow.ubsan.check(i8 11)
+; CHECK-NEXT: ret i1 [[HOT]]
+;
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 11)
+ ret i1 %allow
+}
+
+define void @test_ubsan_void() {
+; CHECK-LABEL: @test_ubsan_void(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret void
+;
+entry:
+ %allow = call i1 @llvm.allow.ubsan.check(i8 11)
+ ret void
+}
diff --git a/llvm/test/Transforms/InstCombine/and-or-implied-cond-not.ll b/llvm/test/Transforms/InstCombine/and-or-implied-cond-not.ll
new file mode 100644
index 0000000..89b3164
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/and-or-implied-cond-not.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+define i1 @test_imply_not1(i32 %depth) {
+; CHECK-LABEL: define i1 @test_imply_not1(
+; CHECK-SAME: i32 [[DEPTH:%.*]]) {
+; CHECK-NEXT: [[CMP1_NOT1:%.*]] = icmp eq i32 [[DEPTH]], 16
+; CHECK-NEXT: call void @use(i1 [[CMP1_NOT1]])
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[DEPTH]], 8
+; CHECK-NEXT: call void @use(i1 [[CMP2]])
+; CHECK-NEXT: br i1 [[CMP1_NOT1]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: call void @func1()
+; CHECK-NEXT: unreachable
+; CHECK: if.else:
+; CHECK-NEXT: call void @func2()
+; CHECK-NEXT: unreachable
+;
+ %cmp1 = icmp eq i32 %depth, 16
+ call void @use(i1 %cmp1)
+ %cmp2 = icmp slt i32 %depth, 8
+ call void @use(i1 %cmp2)
+ %cmp.not = xor i1 %cmp1, true
+ %brmerge = or i1 %cmp2, %cmp.not
+ br i1 %brmerge, label %if.then, label %if.else
+if.then:
+ call void @func1()
+ unreachable
+
+if.else:
+ call void @func2()
+ unreachable
+}
+
+define i1 @test_imply_not2(i32 %a, i1 %cmp2) {
+; CHECK-LABEL: define i1 @test_imply_not2(
+; CHECK-SAME: i32 [[A:%.*]], i1 [[CMP2:%.*]]) {
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[BRMERGE:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP2]]
+; CHECK-NEXT: ret i1 [[BRMERGE]]
+;
+ %cmp1 = icmp eq i32 %a, 0
+ %or.cond = select i1 %cmp1, i1 %cmp2, i1 false
+ %cmp.not = xor i1 %cmp1, true
+ %brmerge = or i1 %or.cond, %cmp.not
+ ret i1 %brmerge
+}
+
+define i1 @test_imply_not3(i32 %a, i32 %b, i1 %cond) {
+; CHECK-LABEL: define i1 @test_imply_not3(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[A]], [[B]]
+; CHECK-NEXT: call void @use(i1 [[CMP1]])
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[A]], [[B]]
+; CHECK-NEXT: [[AND:%.*]] = select i1 [[CMP2]], i1 [[COND]], i1 false
+; CHECK-NEXT: ret i1 [[AND]]
+;
+ %cmp1 = icmp eq i32 %a, %b
+ call void @use(i1 %cmp1)
+ %cmp2 = icmp slt i32 %a, %b
+ %cmp.not = xor i1 %cmp1, true
+ %sel = select i1 %cmp.not, i1 %cond, i1 false
+ %and = and i1 %cmp2, %sel
+ ret i1 %and
+}
+
+declare void @func1()
+declare void @func2()
+declare void @use(i1)
diff --git a/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll b/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll
index 2d72a4f..e234698 100644
--- a/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll
@@ -3,9 +3,8 @@
define i1 @test0(i39 %X, i39 %A) {
; CHECK-LABEL: @test0(
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i39 1, [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i39 [[TMP1]], [[X:%.*]]
-; CHECK-NEXT: [[D:%.*]] = icmp ne i39 [[TMP2]], 0
+; CHECK-NEXT: [[B:%.*]] = lshr i39 [[X:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = trunc i39 [[B]] to i1
; CHECK-NEXT: ret i1 [[D]]
;
%B = lshr i39 %X, %A
@@ -15,9 +14,8 @@ define i1 @test0(i39 %X, i39 %A) {
define i1 @test1(i799 %X, i799 %A) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i799 1, [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i799 [[TMP1]], [[X:%.*]]
-; CHECK-NEXT: [[D:%.*]] = icmp ne i799 [[TMP2]], 0
+; CHECK-NEXT: [[B:%.*]] = lshr i799 [[X:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = trunc i799 [[B]] to i1
; CHECK-NEXT: ret i1 [[D]]
;
%B = lshr i799 %X, %A
diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll
index 927f0a8..87c75fb 100644
--- a/llvm/test/Transforms/InstCombine/assume.ll
+++ b/llvm/test/Transforms/InstCombine/assume.ll
@@ -386,7 +386,7 @@ define i1 @nonnull5(ptr %a) {
define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {
; CHECK-LABEL: @assumption_conflicts_with_known_bits(
; CHECK-NEXT: store i1 true, ptr poison, align 1
-; CHECK-NEXT: ret i32 1
+; CHECK-NEXT: ret i32 poison
;
%and1 = and i32 %b, 3
%B1 = lshr i32 %and1, %and1
diff --git a/llvm/test/Transforms/InstCombine/binop-itofp.ll b/llvm/test/Transforms/InstCombine/binop-itofp.ll
index 7d2b872..cd9ec1e 100644
--- a/llvm/test/Transforms/InstCombine/binop-itofp.ll
+++ b/llvm/test/Transforms/InstCombine/binop-itofp.ll
@@ -1004,3 +1004,125 @@ define float @test_ui_add_with_signed_constant(i32 %shr.i) {
%add = fadd float %sub, -16383.0
ret float %add
}
+
+
+;; Reduced form of bug noticed due to #82555
+define float @missed_nonzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g_2345) {
+; CHECK-LABEL: @missed_nonzero_check_on_constant_for_si_fmul(
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
+; CHECK-NEXT: [[CONV_I:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp i16 [[CONV_I]] to float
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[CONV1_I]])
+; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
+; CHECK-NEXT: ret float [[MUL3_I_I]]
+;
+ %sel = select i1 %c, i32 65529, i32 53264
+ %conv.i = trunc i32 %sel to i16
+ %conv1.i = sitofp i16 %conv.i to float
+ %mul3.i.i = fmul float %conv1.i, 0.000000e+00
+ store i32 %sel, ptr %g_2345, align 4
+ ret float %mul3.i.i
+}
+
+define <2 x float> @missed_nonzero_check_on_constant_for_si_fmul_vec(i1 %c, i1 %.b, ptr %g_2345) {
+; CHECK-LABEL: @missed_nonzero_check_on_constant_for_si_fmul_vec(
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
+; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
+; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[CONV1_I]])
+; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
+; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
+;
+ %sel = select i1 %c, i32 65529, i32 53264
+ %conv.i.s = trunc i32 %sel to i16
+ %conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
+ %conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
+ %conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
+ %mul3.i.i = fmul <2 x float> %conv1.i, zeroinitializer
+ store i32 %sel, ptr %g_2345, align 4
+ ret <2 x float> %mul3.i.i
+}
+
+define float @negzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g_2345) {
+; CHECK-LABEL: @negzero_check_on_constant_for_si_fmul(
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
+; CHECK-NEXT: [[CONV_I:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp i16 [[CONV_I]] to float
+; CHECK-NEXT: [[TMP1:%.*]] = fneg float [[CONV1_I]]
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
+; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
+; CHECK-NEXT: ret float [[MUL3_I_I]]
+;
+ %sel = select i1 %c, i32 65529, i32 53264
+ %conv.i = trunc i32 %sel to i16
+ %conv1.i = sitofp i16 %conv.i to float
+ %mul3.i.i = fmul float %conv1.i, -0.000000e+00
+ store i32 %sel, ptr %g_2345, align 4
+ ret float %mul3.i.i
+}
+
+define <2 x float> @nonzero_check_on_constant_for_si_fmul_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
+; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_vec_w_undef(
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
+; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
+; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[CONV1_I]])
+; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
+; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
+;
+ %sel = select i1 %c, i32 65529, i32 53264
+ %conv.i.s = trunc i32 %sel to i16
+ %conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
+ %conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
+ %conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
+ %mul3.i.i = fmul <2 x float> %conv1.i, <float undef, float 0.000000e+00>
+ store i32 %sel, ptr %g_2345, align 4
+ ret <2 x float> %mul3.i.i
+}
+
+define <2 x float> @nonzero_check_on_constant_for_si_fmul_nz_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
+; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_nz_vec_w_undef(
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
+; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
+; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = fmul <2 x float> [[CONV1_I]], <float undef, float 1.000000e+00>
+; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
+; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
+;
+ %sel = select i1 %c, i32 65529, i32 53264
+ %conv.i.s = trunc i32 %sel to i16
+ %conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
+ %conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
+ %conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
+ %mul3.i.i = fmul <2 x float> %conv1.i, <float undef, float 1.000000e+00>
+ store i32 %sel, ptr %g_2345, align 4
+ ret <2 x float> %mul3.i.i
+}
+
+define <2 x float> @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
+; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
+; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
+; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
+; CHECK-NEXT: [[TMP1:%.*]] = fneg <2 x float> [[CONV1_I]]
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[TMP1]])
+; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
+; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
+;
+ %sel = select i1 %c, i32 65529, i32 53264
+ %conv.i.s = trunc i32 %sel to i16
+ %conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
+ %conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
+ %conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
+ %mul3.i.i = fmul <2 x float> %conv1.i, <float undef, float -0.000000e+00>
+ store i32 %sel, ptr %g_2345, align 4
+ ret <2 x float> %mul3.i.i
+}
diff --git a/llvm/test/Transforms/InstCombine/builtin-hot.ll b/llvm/test/Transforms/InstCombine/builtin-hot.ll
deleted file mode 100644
index 8d33887..0000000
--- a/llvm/test/Transforms/InstCombine/builtin-hot.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-
-define i1 @test() {
-; CHECK-LABEL: @test(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[HOT:%.*]] = call i1 @llvm.experimental.hot()
-; CHECK-NEXT: ret i1 [[HOT]]
-;
-entry:
- %hot = call i1 @llvm.experimental.hot()
- ret i1 %hot
-}
-
-define void @test_void() {
-; CHECK-LABEL: @test_void(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: ret void
-;
-entry:
- %hot = call i1 @llvm.experimental.hot()
- ret void
-}
-
-declare i1 @llvm.expect.hot() nounwind
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index 85433a9..97554e9 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -1399,8 +1399,7 @@ define float @sitofp_zext(i16 %a) {
define i1 @PR23309(i32 %A, i32 %B) {
; ALL-LABEL: @PR23309(
; ALL-NEXT: [[SUB:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
-; ALL-NEXT: [[TMP1:%.*]] = and i32 [[SUB]], 1
-; ALL-NEXT: [[TRUNC:%.*]] = icmp ne i32 [[TMP1]], 0
+; ALL-NEXT: [[TRUNC:%.*]] = trunc i32 [[SUB]] to i1
; ALL-NEXT: ret i1 [[TRUNC]]
;
%add = add i32 %A, -4
@@ -1412,8 +1411,7 @@ define i1 @PR23309(i32 %A, i32 %B) {
define i1 @PR23309v2(i32 %A, i32 %B) {
; ALL-LABEL: @PR23309v2(
; ALL-NEXT: [[SUB:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
-; ALL-NEXT: [[TMP1:%.*]] = and i32 [[SUB]], 1
-; ALL-NEXT: [[TRUNC:%.*]] = icmp ne i32 [[TMP1]], 0
+; ALL-NEXT: [[TRUNC:%.*]] = trunc i32 [[SUB]] to i1
; ALL-NEXT: ret i1 [[TRUNC]]
;
%add = add i32 %A, -4
diff --git a/llvm/test/Transforms/InstCombine/catchswitch-phi.ll b/llvm/test/Transforms/InstCombine/catchswitch-phi.ll
index 0388476..cb87ee6 100644
--- a/llvm/test/Transforms/InstCombine/catchswitch-phi.ll
+++ b/llvm/test/Transforms/InstCombine/catchswitch-phi.ll
@@ -24,11 +24,11 @@ define void @test0(i1 %c1) personality ptr @__gxx_wasm_personality_v0 {
; CHECK: bb1:
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i32 4
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[BB3:%.*]] unwind label [[BB4:%.*]]
+; CHECK-NEXT: to label [[BB3:%.*]] unwind label [[BB4:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i32 4
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[BB3]] unwind label [[BB4]]
+; CHECK-NEXT: to label [[BB3]] unwind label [[BB4]]
; CHECK: bb3:
; CHECK-NEXT: unreachable
; CHECK: bb4:
@@ -37,7 +37,7 @@ define void @test0(i1 %c1) personality ptr @__gxx_wasm_personality_v0 {
; CHECK: bb5:
; CHECK-NEXT: [[TMP5:%.*]] = catchpad within [[TMP4]] [ptr null]
; CHECK-NEXT: invoke void @foo() [ "funclet"(token [[TMP5]]) ]
-; CHECK-NEXT: to label [[BB6:%.*]] unwind label [[BB7]]
+; CHECK-NEXT: to label [[BB6:%.*]] unwind label [[BB7]]
; CHECK: bb6:
; CHECK-NEXT: unreachable
; CHECK: bb7:
@@ -89,10 +89,10 @@ define void @test1() personality ptr @__gxx_wasm_personality_v0 {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[CATCH_DISPATCH1:%.*]]
+; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[CATCH_DISPATCH1:%.*]]
; CHECK: invoke.cont:
; CHECK-NEXT: [[CALL:%.*]] = invoke i32 @baz()
-; CHECK-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
+; CHECK-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: invoke.cont1:
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
@@ -101,7 +101,7 @@ define void @test1() personality ptr @__gxx_wasm_personality_v0 {
; CHECK: if.end:
; CHECK-NEXT: [[AP_0:%.*]] = phi i8 [ 1, [[IF_THEN]] ], [ 0, [[INVOKE_CONT1]] ]
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[CATCH_DISPATCH]]
+; CHECK-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[CATCH_DISPATCH]]
; CHECK: invoke.cont2:
; CHECK-NEXT: br label [[TRY_CONT:%.*]]
; CHECK: catch.dispatch:
@@ -114,17 +114,16 @@ define void @test1() personality ptr @__gxx_wasm_personality_v0 {
; CHECK-NEXT: catchret from [[TMP1]] to label [[TRY_CONT]]
; CHECK: rethrow:
; CHECK-NEXT: invoke void @llvm.wasm.rethrow() #[[ATTR0:[0-9]+]] [ "funclet"(token [[TMP1]]) ]
-; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH1]]
+; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH1]]
; CHECK: catch.dispatch1:
; CHECK-NEXT: [[AP_2:%.*]] = phi i8 [ [[AP_1]], [[CATCH_DISPATCH]] ], [ [[AP_1]], [[RETHROW]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = catchswitch within none [label %catch.start1] unwind to caller
; CHECK: catch.start1:
; CHECK-NEXT: [[TMP3:%.*]] = catchpad within [[TMP2]] [ptr null]
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[AP_2]], 1
-; CHECK-NEXT: [[TOBOOL1_NOT:%.*]] = icmp eq i8 [[TMP0]], 0
+; CHECK-NEXT: [[TOBOOL1_NOT:%.*]] = trunc i8 [[AP_2]] to i1
; CHECK-NEXT: br i1 [[TOBOOL1_NOT]], label [[IF_END1:%.*]], label [[IF_THEN1:%.*]]
; CHECK: if.then1:
-; CHECK-NEXT: br label [[IF_END1]]
+; CHECK-NEXT: br label [[IF_THEN1]]
; CHECK: if.end1:
; CHECK-NEXT: catchret from [[TMP3]] to label [[TRY_CONT]]
; CHECK: try.cont:
diff --git a/llvm/test/Transforms/InstCombine/copysign-fneg-fabs.ll b/llvm/test/Transforms/InstCombine/copysign-fneg-fabs.ll
index af939cf..a63eeab 100644
--- a/llvm/test/Transforms/InstCombine/copysign-fneg-fabs.ll
+++ b/llvm/test/Transforms/InstCombine/copysign-fneg-fabs.ll
@@ -275,6 +275,85 @@ define half @fneg_fabs_copysign_multi_use_fabs(half %x, half %y, ptr %ptr) {
ret half %fabs.copysign
}
+define half @copysign_pos(half %a) {
+; CHECK-LABEL: @copysign_pos(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call half @llvm.copysign.f16(half 0xH3C00, half [[A:%.*]])
+; CHECK-NEXT: ret half [[RET]]
+;
+entry:
+ %ret = call half @llvm.copysign.f16(half 0xH3C00, half %a)
+ ret half %ret
+}
+
+define half @copysign_neg(half %a) {
+; CHECK-LABEL: @copysign_neg(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call half @llvm.copysign.f16(half 0xH3C00, half [[A:%.*]])
+; CHECK-NEXT: ret half [[RET]]
+;
+entry:
+ %ret = call half @llvm.copysign.f16(half 0xHBC00, half %a)
+ ret half %ret
+}
+
+define half @copysign_negzero(half %a) {
+; CHECK-LABEL: @copysign_negzero(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call half @llvm.copysign.f16(half 0xH0000, half [[A:%.*]])
+; CHECK-NEXT: ret half [[RET]]
+;
+entry:
+ %ret = call half @llvm.copysign.f16(half 0xH8000, half %a)
+ ret half %ret
+}
+
+define half @copysign_negnan(half %a) {
+; CHECK-LABEL: @copysign_negnan(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call half @llvm.copysign.f16(half 0xH7E00, half [[A:%.*]])
+; CHECK-NEXT: ret half [[RET]]
+;
+entry:
+ %ret = call half @llvm.copysign.f16(half 0xHFE00, half %a)
+ ret half %ret
+}
+
+define half @copysign_neginf(half %a) {
+; CHECK-LABEL: @copysign_neginf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call half @llvm.copysign.f16(half 0xH7C00, half [[A:%.*]])
+; CHECK-NEXT: ret half [[RET]]
+;
+entry:
+ %ret = call half @llvm.copysign.f16(half 0xHFC00, half %a)
+ ret half %ret
+}
+
+define <4 x half> @copysign_splat(<4 x half> %a) {
+; CHECK-LABEL: @copysign_splat(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call <4 x half> @llvm.copysign.v4f16(<4 x half> <half 0xH3C00, half 0xH3C00, half 0xH3C00, half 0xH3C00>, <4 x half> [[A:%.*]])
+; CHECK-NEXT: ret <4 x half> [[RET]]
+;
+entry:
+ %ret = call <4 x half> @llvm.copysign.v4f16(<4 x half> splat(half 0xHBC00), <4 x half> %a)
+ ret <4 x half> %ret
+}
+
+; TODO: Support constant folding of fabs
+
+define <4 x half> @copysign_vec4(<4 x half> %a) {
+; CHECK-LABEL: @copysign_vec4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call <4 x half> @llvm.copysign.v4f16(<4 x half> <half 0xH3C00, half 0xHBC00, half undef, half poison>, <4 x half> [[A:%.*]])
+; CHECK-NEXT: ret <4 x half> [[RET]]
+;
+entry:
+ %ret = call <4 x half> @llvm.copysign.v4f16(<4 x half> <half 0xH3C00, half 0xHBC00, half undef, half poison>, <4 x half> %a)
+ ret <4 x half> %ret
+}
+
declare half @llvm.fabs.f16(half)
declare <2 x half> @llvm.fabs.v2f16(<2 x half>)
declare half @llvm.copysign.f16(half, half)
diff --git a/llvm/test/Transforms/InstCombine/div.ll b/llvm/test/Transforms/InstCombine/div.ll
index 1309dee..e8a25ff 100644
--- a/llvm/test/Transforms/InstCombine/div.ll
+++ b/llvm/test/Transforms/InstCombine/div.ll
@@ -1810,3 +1810,25 @@ define i6 @udiv_distribute_mul_nsw_add_nuw(i6 %x) {
%div = udiv i6 %add, 3
ret i6 %div
}
+
+define i32 @fold_disjoint_or_over_sdiv(i32 %x) {
+; CHECK-LABEL: @fold_disjoint_or_over_sdiv(
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[X:%.*]], 9
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %mul = mul nsw i32 %x, 9
+ %or = or disjoint i32 %mul, 81
+ %r = sdiv i32 %or, 9
+ ret i32 %r
+}
+
+define i32 @fold_disjoint_or_over_udiv(i32 %x) {
+; CHECK-LABEL: @fold_disjoint_or_over_udiv(
+; CHECK-NEXT: [[R:%.*]] = add nuw i32 [[X:%.*]], 9
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %mul = mul nuw i32 %x, 9
+ %or = or disjoint i32 %mul, 81
+ %r = udiv i32 %or, 9
+ ret i32 %r
+}
diff --git a/llvm/test/Transforms/InstCombine/extract-select-agg.ll b/llvm/test/Transforms/InstCombine/extract-select-agg.ll
new file mode 100644
index 0000000..6ba6b1a
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/extract-select-agg.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+define i64 @test_select_agg_constant_agg(i64 %val, i1 %cond) {
+; CHECK-LABEL: define i64 @test_select_agg_constant_agg(
+; CHECK-SAME: i64 [[VAL:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = zext i1 [[COND]] to i64
+; CHECK-NEXT: ret i64 [[RET]]
+;
+entry:
+ %sel = select i1 %cond, { i64, i64 } {i64 1, i64 2}, { i64, i64 } {i64 0, i64 3}
+ %ret = extractvalue { i64, i64 } %sel, 0
+ ret i64 %ret
+}
+
+define void @test_select_agg_constant_agg_multiuse(i64 %val, i1 %cond) {
+; CHECK-LABEL: define void @test_select_agg_constant_agg_multiuse(
+; CHECK-SAME: i64 [[VAL:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = zext i1 [[COND]] to i64
+; CHECK-NEXT: call void @use(i64 [[RET]])
+; CHECK-NEXT: [[V1:%.*]] = select i1 [[COND]], i64 2, i64 3
+; CHECK-NEXT: call void @use(i64 [[V1]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %sel = select i1 %cond, { i64, i64 } {i64 1, i64 2}, { i64, i64 } {i64 0, i64 3}
+ %v0 = extractvalue { i64, i64 } %sel, 0
+ call void @use(i64 %v0)
+ %v1 = extractvalue { i64, i64 } %sel, 1
+ call void @use(i64 %v1)
+ ret void
+}
+
+; TODO: it can be folded to zext i1 %cond to i64
+define i64 @test_select_agg_constant(i64 %val, i1 %cond) {
+; CHECK-LABEL: define i64 @test_select_agg_constant(
+; CHECK-SAME: i64 [[VAL:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = insertvalue { i64, i64 } { i64 1, i64 poison }, i64 [[VAL]], 1
+; CHECK-NEXT: [[B:%.*]] = insertvalue { i64, i64 } { i64 0, i64 poison }, i64 [[VAL]], 1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], { i64, i64 } [[A]], { i64, i64 } [[B]]
+; CHECK-NEXT: [[RET:%.*]] = extractvalue { i64, i64 } [[SEL]], 0
+; CHECK-NEXT: ret i64 [[RET]]
+;
+entry:
+ %a = insertvalue { i64, i64 } { i64 1, i64 poison }, i64 %val, 1
+ %b = insertvalue { i64, i64 } { i64 0, i64 poison }, i64 %val, 1
+ %sel = select i1 %cond, { i64, i64 } %a, { i64, i64 } %b
+ %ret = extractvalue { i64, i64 } %sel, 0
+ ret i64 %ret
+}
+
+define void @test_select_agg_multiuse(i1 %cond, i64 %v1, i64 %v2, i64 %v3, i64 %v4) {
+; CHECK-LABEL: define void @test_select_agg_multiuse(
+; CHECK-SAME: i1 [[COND:%.*]], i64 [[V1:%.*]], i64 [[V2:%.*]], i64 [[V3:%.*]], i64 [[V4:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A0:%.*]] = insertvalue { i64, i64 } poison, i64 [[V1]], 0
+; CHECK-NEXT: [[A1:%.*]] = insertvalue { i64, i64 } [[A0]], i64 [[V2]], 1
+; CHECK-NEXT: [[B0:%.*]] = insertvalue { i64, i64 } poison, i64 [[V3]], 0
+; CHECK-NEXT: [[B1:%.*]] = insertvalue { i64, i64 } [[B0]], i64 [[V4]], 1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], { i64, i64 } [[A1]], { i64, i64 } [[B1]]
+; CHECK-NEXT: [[X:%.*]] = extractvalue { i64, i64 } [[SEL]], 0
+; CHECK-NEXT: call void @use(i64 [[X]])
+; CHECK-NEXT: [[Y:%.*]] = extractvalue { i64, i64 } [[SEL]], 1
+; CHECK-NEXT: call void @use(i64 [[Y]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %a0 = insertvalue { i64, i64 } poison, i64 %v1, 0
+ %a1 = insertvalue { i64, i64 } %a0, i64 %v2, 1
+ %b0 = insertvalue { i64, i64 } poison, i64 %v3, 0
+ %b1 = insertvalue { i64, i64 } %b0, i64 %v4, 1
+ %sel = select i1 %cond, { i64, i64 } %a1, { i64, i64 } %b1
+ %x = extractvalue { i64, i64 } %sel, 0
+ call void @use(i64 %x)
+ %y = extractvalue { i64, i64 } %sel, 1
+ call void @use(i64 %y)
+ ret void
+}
+
+declare void @use(i64)
diff --git a/llvm/test/Transforms/InstCombine/fcmp.ll b/llvm/test/Transforms/InstCombine/fcmp.ll
index 159c84d..f2701d1 100644
--- a/llvm/test/Transforms/InstCombine/fcmp.ll
+++ b/llvm/test/Transforms/InstCombine/fcmp.ll
@@ -644,7 +644,7 @@ define <2 x i1> @is_signbit_set_anyzero(<2 x double> %x) {
define i1 @is_signbit_clear(double %x) {
; CHECK-LABEL: @is_signbit_clear(
-; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double -4.200000e+01, double [[X:%.*]])
+; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double 4.200000e+01, double [[X:%.*]])
; CHECK-NEXT: [[R:%.*]] = fcmp ogt double [[S]], 0.000000e+00
; CHECK-NEXT: ret i1 [[R]]
;
@@ -655,7 +655,7 @@ define i1 @is_signbit_clear(double %x) {
define i1 @is_signbit_clear_1(double %x) {
; CHECK-LABEL: @is_signbit_clear_1(
-; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double -4.200000e+01, double [[X:%.*]])
+; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double 4.200000e+01, double [[X:%.*]])
; CHECK-NEXT: [[R:%.*]] = fcmp ugt double [[S]], 0.000000e+00
; CHECK-NEXT: ret i1 [[R]]
;
@@ -666,7 +666,7 @@ define i1 @is_signbit_clear_1(double %x) {
define i1 @is_signbit_clear_2(double %x) {
; CHECK-LABEL: @is_signbit_clear_2(
-; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double -4.200000e+01, double [[X:%.*]])
+; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double 4.200000e+01, double [[X:%.*]])
; CHECK-NEXT: [[R:%.*]] = fcmp oge double [[S]], 0.000000e+00
; CHECK-NEXT: ret i1 [[R]]
;
@@ -677,7 +677,7 @@ define i1 @is_signbit_clear_2(double %x) {
define i1 @is_signbit_clear_3(double %x) {
; CHECK-LABEL: @is_signbit_clear_3(
-; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double -4.200000e+01, double [[X:%.*]])
+; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double 4.200000e+01, double [[X:%.*]])
; CHECK-NEXT: [[R:%.*]] = fcmp uge double [[S]], 0.000000e+00
; CHECK-NEXT: ret i1 [[R]]
;
@@ -705,7 +705,7 @@ define i1 @is_signbit_set_extra_use(double %x, ptr %p) {
define i1 @is_signbit_clear_nonzero(double %x) {
; CHECK-LABEL: @is_signbit_clear_nonzero(
-; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double -4.200000e+01, double [[X:%.*]])
+; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double 4.200000e+01, double [[X:%.*]])
; CHECK-NEXT: [[R:%.*]] = fcmp ogt double [[S]], 1.000000e+00
; CHECK-NEXT: ret i1 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 7e7373e..f6435f0 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -1093,11 +1093,11 @@ for.body:
define double @fmul_negated_constant_expression(double %x) {
; CHECK-LABEL: @fmul_negated_constant_expression(
-; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, inrange i32 0, i64 2) to i64) to double)
+; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 1, i32 0, i64 0) to i64) to double)
; CHECK-NEXT: [[R:%.*]] = fmul double [[FSUB]], [[X:%.*]]
; CHECK-NEXT: ret double [[R]]
;
- %fsub = fsub double -0.000000e+00, bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, inrange i32 0, i64 2) to i64) to double)
+ %fsub = fsub double -0.000000e+00, bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, i32 0, i64 2) to i64) to double)
%r = fmul double %x, %fsub
ret double %r
}
@@ -1250,7 +1250,7 @@ define half @mul_zero_nnan(half %x) {
define <2 x float> @mul_zero_nnan_vec_poison(<2 x float> %x) {
; CHECK-LABEL: @mul_zero_nnan_vec_poison(
-; CHECK-NEXT: [[R:%.*]] = call nnan <2 x float> @llvm.copysign.v2f32(<2 x float> <float 0.000000e+00, float poison>, <2 x float> [[X:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call nnan <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[X:%.*]])
; CHECK-NEXT: ret <2 x float> [[R]]
;
%r = fmul nnan <2 x float> %x, <float 0.0, float poison>
@@ -1268,13 +1268,104 @@ define half @mul_zero(half %x) {
ret half %r
}
-; TODO: This could be fneg+copysign.
-
define half @mul_negzero_nnan(half %x) {
; CHECK-LABEL: @mul_negzero_nnan(
-; CHECK-NEXT: [[R:%.*]] = fmul nnan half [[X:%.*]], 0xH8000
+; CHECK-NEXT: [[TMP1:%.*]] = fneg nnan half [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = call nnan half @llvm.copysign.f16(half 0xH0000, half [[TMP1]])
; CHECK-NEXT: ret half [[R]]
;
%r = fmul nnan half %x, -0.0
ret half %r
}
+
+define float @mul_pos_zero_nnan_ninf(float nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_pos_zero_nnan_ninf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[A:%.*]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul float %a, 0.000000e+00
+ ret float %ret
+}
+
+define float @mul_pos_zero_nnan(float nofpclass(nan) %a) {
+; CHECK-LABEL: @mul_pos_zero_nnan(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = fmul float [[A:%.*]], 0.000000e+00
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul float %a, 0.000000e+00
+ ret float %ret
+}
+
+define float @mul_pos_zero_nnan_ninf_fmf(float nofpclass(nan) %a) {
+; CHECK-LABEL: @mul_pos_zero_nnan_ninf_fmf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call ninf float @llvm.copysign.f32(float 0.000000e+00, float [[A:%.*]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul ninf float %a, 0.000000e+00
+ ret float %ret
+}
+
+define float @mul_neg_zero_nnan_ninf(float nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_ninf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg float [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[TMP0]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul float %a, -0.000000e+00
+ ret float %ret
+}
+
+define float @mul_neg_zero_nnan_fmf(float %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_fmf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg nnan float [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.copysign.f32(float 0.000000e+00, float [[TMP0]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul nnan float %a, -0.000000e+00
+ ret float %ret
+}
+
+define float @mul_neg_zero_nnan_ninf_fmf(float nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_ninf_fmf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg nnan ninf float [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call nnan ninf float @llvm.copysign.f32(float 0.000000e+00, float [[TMP0]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul nnan ninf float %a, -0.000000e+00
+ ret float %ret
+}
+
+define <3 x float> @mul_neg_zero_nnan_ninf_vec(<3 x float> nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_ninf_vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg <3 x float> [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call <3 x float> @llvm.copysign.v3f32(<3 x float> zeroinitializer, <3 x float> [[TMP0]])
+; CHECK-NEXT: ret <3 x float> [[RET]]
+;
+entry:
+ %ret = fmul <3 x float> %a, <float -0.0, float undef, float poison>
+ ret <3 x float> %ret
+}
+
+define <3 x float> @mul_mixed_zero_nnan_ninf_vec(<3 x float> nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_mixed_zero_nnan_ninf_vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = fmul <3 x float> [[A:%.*]], <float -0.000000e+00, float 0.000000e+00, float poison>
+; CHECK-NEXT: ret <3 x float> [[RET]]
+;
+entry:
+ %ret = fmul <3 x float> %a, <float -0.0, float 0.0, float poison>
+ ret <3 x float> %ret
+}
diff --git a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
index a7adcd1..dedd12f 100644
--- a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
+++ b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
@@ -427,13 +427,13 @@ define float @fmul_by_snan_if_0_oeq_zero_f32(float %x) {
define float @fmul_by_var_if_0_oeq_zero_f32(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -441,14 +441,14 @@ define float @fmul_by_fabs_var_if_0_oeq_zero_f32(float %x, float %y) {
; CHECK-LABEL: @fmul_by_fabs_var_if_0_oeq_zero_f32(
; CHECK-NEXT: [[Y_FABS:%.*]] = call float @llvm.fabs.f32(float [[Y:%.*]])
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y_FABS]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y_FABS]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%y.fabs = call float @llvm.fabs.f32(float %y)
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul float %x, %y.fabs
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -467,13 +467,13 @@ define float @fmul_by_fabs_nnan_ninf_var_if_0_oeq_zero_f32(float %x, float %y) {
define float @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nsz float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nsz float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -481,13 +481,13 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(float %x, float %y) {
define float @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul ninf nsz float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nsz ninf float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -495,13 +495,13 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(float %x, float %y) {
define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan nsz float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nsz nnan float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -509,13 +509,13 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(float %x, float %y) {
define float @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nnan ninf float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -558,26 +558,26 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_nsz_inverted(f
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nnan ninf nsz float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nnan ninf nsz float %y, %x
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -585,26 +585,26 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero(float %x, float nofpclass(nzero) %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nnan ninf float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub(float %x, float nofpclass(nzero nsub) %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nnan ninf float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -622,26 +622,26 @@ define float @fmul_by_var_if_0_oeq_zero_f32_known_never_nan_inf_select_nsz(float
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero(float %x, float nofpclass(nan inf nzero) %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero_nsub(float %x, float nofpclass(nan inf nzero nsub) %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero_nsub(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul float %x, %y
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
@@ -692,26 +692,26 @@ define float @fmul_by_var_if_not_one_0_zero_f32_assume_finite_fmul_nsz(float %x,
define float @fmul_by_self_if_0_oeq_zero_f32(float %x) {
; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul float %x, %x
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
define float @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x) {
; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
%scaled.x = fmul nnan ninf nsz float %x, %x
- %scaled.if.denormal = select i1 %x.is.zero, float %scaled.x, float %x
+ %scaled.if.denormal = select nnan i1 %x.is.zero, float %scaled.x, float %x
ret float %scaled.if.denormal
}
diff --git a/llvm/test/Transforms/InstCombine/fpcast.ll b/llvm/test/Transforms/InstCombine/fpcast.ll
index 3e5c6fd..ac4b88f 100644
--- a/llvm/test/Transforms/InstCombine/fpcast.ll
+++ b/llvm/test/Transforms/InstCombine/fpcast.ll
@@ -347,3 +347,88 @@ define double @masked_uint_to_fpext3(i32 %x) {
%r = fpext float %f to double
ret double %r
}
+
+define i32 @fptosi_nonnorm(float nofpclass(norm) %x) {
+; CHECK-LABEL: @fptosi_nonnorm(
+; CHECK-NEXT: ret i32 0
+;
+ %ret = fptosi float %x to i32
+ ret i32 %ret
+}
+
+define i32 @fptoui_nonnorm(float nofpclass(pnorm) %x) {
+; CHECK-LABEL: @fptoui_nonnorm(
+; CHECK-NEXT: ret i32 0
+;
+ %ret = fptoui float %x to i32
+ ret i32 %ret
+}
+
+define i32 @fptosi_nonnnorm(float nofpclass(nnorm) %x) {
+; CHECK-LABEL: @fptosi_nonnnorm(
+; CHECK-NEXT: [[RET:%.*]] = fptosi float [[X:%.*]] to i32
+; CHECK-NEXT: ret i32 [[RET]]
+;
+ %ret = fptosi float %x to i32
+ ret i32 %ret
+}
+
+define i32 @fptoui_nonnnorm(float nofpclass(nnorm) %x) {
+; CHECK-LABEL: @fptoui_nonnnorm(
+; CHECK-NEXT: [[RET:%.*]] = fptoui float [[X:%.*]] to i32
+; CHECK-NEXT: ret i32 [[RET]]
+;
+ %ret = fptoui float %x to i32
+ ret i32 %ret
+}
+
+define i32 @fptosi_nonnorm_copysign(float %x) {
+; CHECK-LABEL: @fptosi_nonnorm_copysign(
+; CHECK-NEXT: ret i32 0
+;
+ %val = call float @llvm.copysign.f32(float 0.0, float %x)
+ %ret = fptosi float %val to i32
+ ret i32 %ret
+}
+
+define <2 x i32> @fptosi_nonnorm_copysign_vec(<2 x float> %x) {
+; CHECK-LABEL: @fptosi_nonnorm_copysign_vec(
+; CHECK-NEXT: ret <2 x i32> zeroinitializer
+;
+ %val = call <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> %x)
+ %ret = fptosi <2 x float> %val to <2 x i32>
+ ret <2 x i32> %ret
+}
+
+define i32 @fptosi_nonnorm_fmul(float %x) {
+; CHECK-LABEL: @fptosi_nonnorm_fmul(
+; CHECK-NEXT: [[SEL:%.*]] = fmul float [[X:%.*]], 0.000000e+00
+; CHECK-NEXT: [[RET:%.*]] = fptosi float [[SEL]] to i32
+; CHECK-NEXT: ret i32 [[RET]]
+;
+ %sel = fmul float %x, 0.000000e+00
+ %ret = fptosi float %sel to i32
+ ret i32 %ret
+}
+
+define i32 @fptosi_select(i1 %cond) {
+; CHECK-LABEL: @fptosi_select(
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[COND:%.*]], i32 1, i32 -1
+; CHECK-NEXT: ret i32 [[RET]]
+;
+ %sel = select i1 %cond, float 1.0, float -1.0
+ %ret = fptosi float %sel to i32
+ ret i32 %ret
+}
+
+define i32 @mul_pos_zero_convert(i32 %a) {
+; CHECK-LABEL: @mul_pos_zero_convert(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %fp = sitofp i32 %a to float
+ %ret = fmul float %fp, 0.000000e+00
+ %conv = fptosi float %ret to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/Transforms/InstCombine/freeze.ll b/llvm/test/Transforms/InstCombine/freeze.ll
index da59101..e8105b62 100644
--- a/llvm/test/Transforms/InstCombine/freeze.ll
+++ b/llvm/test/Transforms/InstCombine/freeze.ll
@@ -1049,7 +1049,7 @@ exit:
define ptr @freeze_load_noundef(ptr %ptr) {
; CHECK-LABEL: @freeze_load_noundef(
-; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !noundef !0
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !noundef [[META0:![0-9]+]]
; CHECK-NEXT: ret ptr [[P]]
;
%p = load ptr, ptr %ptr, !noundef !0
@@ -1059,7 +1059,7 @@ define ptr @freeze_load_noundef(ptr %ptr) {
define ptr @freeze_load_dereferenceable(ptr %ptr) {
; CHECK-LABEL: @freeze_load_dereferenceable(
-; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable !1
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable [[META1:![0-9]+]]
; CHECK-NEXT: ret ptr [[P]]
;
%p = load ptr, ptr %ptr, !dereferenceable !1
@@ -1138,6 +1138,17 @@ define i32 @propagate_drop_flags_or(i32 %arg) {
ret i32 %v1.fr
}
+define i32 @propagate_drop_flags_trunc(i64 %arg) {
+; CHECK-LABEL: @propagate_drop_flags_trunc(
+; CHECK-NEXT: [[ARG_FR:%.*]] = freeze i64 [[ARG:%.*]]
+; CHECK-NEXT: [[V1:%.*]] = trunc i64 [[ARG_FR]] to i32
+; CHECK-NEXT: ret i32 [[V1]]
+;
+ %v1 = trunc nsw nuw i64 %arg to i32
+ %v1.fr = freeze i32 %v1
+ ret i32 %v1.fr
+}
+
!0 = !{}
!1 = !{i64 4}
!2 = !{i32 0, i32 100}
@@ -1145,8 +1156,8 @@ define i32 @propagate_drop_flags_or(i32 %arg) {
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
; CHECK: attributes #[[ATTR1]] = { nounwind }
;.
-; CHECK: [[META0:![0-9]+]] = !{}
-; CHECK: [[META1:![0-9]+]] = !{i64 4}
+; CHECK: [[META0]] = !{}
+; CHECK: [[META1]] = !{i64 4}
; CHECK: [[RNG2]] = !{i32 0, i32 100}
; CHECK: [[RNG3]] = !{i32 0, i32 33}
;.
diff --git a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
index 0706092..5de3e89 100644
--- a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
@@ -226,12 +226,11 @@ define i1 @src_is_mask_shl_lshr(i8 %x_in, i8 %y, i1 %cond) {
define i1 @src_is_mask_shl_lshr_fail_not_allones(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_is_mask_shl_lshr_fail_not_allones(
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = and i8 [[TMP1]], -2
-; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[MASK]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]]
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[X_IN:%.*]], -124
+; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[TMP2]], [[MASK]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP3]], -1
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -471,6 +470,24 @@ define i1 @src_is_notmask_x_xor_neg_x(i8 %x_in, i8 %y, i1 %cond) {
ret i1 %r
}
+define i1 @src_is_notmask_x_xor_neg_x_inv(i8 %x_in, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_is_notmask_x_xor_neg_x_inv(
+; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[NEG_Y:%.*]] = add i8 [[Y:%.*]], -1
+; CHECK-NEXT: [[NOTMASK0:%.*]] = xor i8 [[NEG_Y]], [[Y]]
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[NOTMASK0]], i8 7
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x = xor i8 %x_in, 123
+ %neg_y = sub i8 0, %y
+ %nmask0 = xor i8 %y, %neg_y
+ %notmask = select i1 %cond, i8 %nmask0, i8 -8
+ %and = and i8 %notmask, %x
+ %r = icmp eq i8 %and, 0
+ ret i1 %r
+}
+
define i1 @src_is_notmask_shl_fail_multiuse_invert(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_is_notmask_shl_fail_multiuse_invert(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 122
@@ -572,11 +589,10 @@ define i1 @src_is_notmask_neg_p2(i8 %x_in, i8 %y) {
define i1 @src_is_notmask_neg_p2_fail_not_invertable(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_notmask_neg_p2_fail_not_invertable(
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[Y]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], -124
+; CHECK-NEXT: [[TMP2:%.*]] = sub i8 0, [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[TMP2]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[TMP3]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -657,3 +673,238 @@ define i1 @src_is_mask_const_sge(i8 %x_in) {
%r = icmp sge i8 %and, %x
ret i1 %r
}
+
+define i1 @src_x_and_mask_slt(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_mask_slt(
+; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
+; CHECK-NEXT: [[MASK_POS:%.*]] = icmp sgt i8 [[MASK]], -1
+; CHECK-NEXT: call void @llvm.assume(i1 [[MASK_POS]])
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %mask0 = lshr i8 -1, %y
+ %mask = select i1 %cond, i8 %mask0, i8 0
+ %mask_pos = icmp sge i8 %mask, 0
+ call void @llvm.assume(i1 %mask_pos)
+ %and = and i8 %x, %mask
+ %r = icmp slt i8 %and, %x
+ ret i1 %r
+}
+
+define i1 @src_x_and_mask_sge(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_mask_sge(
+; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
+; CHECK-NEXT: [[MASK_POS:%.*]] = icmp sgt i8 [[MASK]], -1
+; CHECK-NEXT: call void @llvm.assume(i1 [[MASK_POS]])
+; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %mask0 = lshr i8 -1, %y
+ %mask = select i1 %cond, i8 %mask0, i8 0
+ %mask_pos = icmp sge i8 %mask, 0
+ call void @llvm.assume(i1 %mask_pos)
+ %and = and i8 %x, %mask
+ %r = icmp sge i8 %and, %x
+ ret i1 %r
+}
+
+define i1 @src_x_and_mask_slt_fail_maybe_neg(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_mask_slt_fail_maybe_neg(
+; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[X]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %mask0 = lshr i8 -1, %y
+ %mask = select i1 %cond, i8 %mask0, i8 0
+ %and = and i8 %x, %mask
+ %r = icmp slt i8 %and, %x
+ ret i1 %r
+}
+
+define i1 @src_x_and_mask_sge_fail_maybe_neg(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_mask_sge_fail_maybe_neg(
+; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[AND]], [[X]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %mask0 = lshr i8 -1, %y
+ %mask = select i1 %cond, i8 %mask0, i8 0
+ %and = and i8 %x, %mask
+ %r = icmp sge i8 %and, %x
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_eq(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_nmask_eq(
+; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[R1:%.*]] = icmp ule i8 [[NOT_MASK0]], [[X:%.*]]
+; CHECK-NEXT: [[NOT_COND:%.*]] = xor i1 [[COND:%.*]], true
+; CHECK-NEXT: [[R:%.*]] = select i1 [[NOT_COND]], i1 true, i1 [[R1]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask0 = shl i8 -1, %y
+ %not_mask = select i1 %cond, i8 %not_mask0, i8 0
+ %and = and i8 %x, %not_mask
+ %r = icmp eq i8 %not_mask, %and
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_ne(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_nmask_ne(
+; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[R1:%.*]] = icmp ugt i8 [[NOT_MASK0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = select i1 [[COND:%.*]], i1 [[R1]], i1 false
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask0 = shl i8 -1, %y
+ %not_mask = select i1 %cond, i8 %not_mask0, i8 0
+ %and = and i8 %x, %not_mask
+ %r = icmp ne i8 %and, %not_mask
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_ult(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_nmask_ult(
+; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[R1:%.*]] = icmp ugt i8 [[NOT_MASK0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = select i1 [[COND:%.*]], i1 [[R1]], i1 false
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask0 = shl i8 -1, %y
+ %not_mask = select i1 %cond, i8 %not_mask0, i8 0
+ %and = and i8 %x, %not_mask
+ %r = icmp ult i8 %and, %not_mask
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_uge(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_nmask_uge(
+; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[R1:%.*]] = icmp ule i8 [[NOT_MASK0]], [[X:%.*]]
+; CHECK-NEXT: [[NOT_COND:%.*]] = xor i1 [[COND:%.*]], true
+; CHECK-NEXT: [[R:%.*]] = select i1 [[NOT_COND]], i1 true, i1 [[R1]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask0 = shl i8 -1, %y
+ %not_mask = select i1 %cond, i8 %not_mask0, i8 0
+ %and = and i8 %x, %not_mask
+ %r = icmp uge i8 %and, %not_mask
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_slt(i8 %x, i8 %y) {
+; CHECK-LABEL: @src_x_and_nmask_slt(
+; CHECK-NEXT: [[NOT_MASK:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[NOT_MASK]], [[X:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask = shl i8 -1, %y
+ %and = and i8 %x, %not_mask
+ %r = icmp slt i8 %and, %not_mask
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_sge(i8 %x, i8 %y) {
+; CHECK-LABEL: @src_x_and_nmask_sge(
+; CHECK-NEXT: [[NOT_MASK:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[NOT_MASK]], [[X:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask = shl i8 -1, %y
+ %and = and i8 %x, %not_mask
+ %r = icmp sge i8 %and, %not_mask
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_slt_fail_maybe_z(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_nmask_slt_fail_maybe_z(
+; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[NOT_MASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOT_MASK0]], i8 0
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[NOT_MASK]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[NOT_MASK]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask0 = shl i8 -1, %y
+ %not_mask = select i1 %cond, i8 %not_mask0, i8 0
+ %and = and i8 %x, %not_mask
+ %r = icmp slt i8 %and, %not_mask
+ ret i1 %r
+}
+
+define i1 @src_x_and_nmask_sge_fail_maybe_z(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_and_nmask_sge_fail_maybe_z(
+; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[NOT_MASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOT_MASK0]], i8 0
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[NOT_MASK]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[AND]], [[NOT_MASK]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %not_mask0 = shl i8 -1, %y
+ %not_mask = select i1 %cond, i8 %not_mask0, i8 0
+ %and = and i8 %x, %not_mask
+ %r = icmp sge i8 %and, %not_mask
+ ret i1 %r
+}
+
+define i1 @src_x_or_mask_eq(i8 %x, i8 %y, i8 %z, i1 %c2, i1 %cond) {
+; CHECK-LABEL: @src_x_or_mask_eq(
+; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -124
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[C2:%.*]], i8 [[TMP1]], i8 -46
+; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.umax.i8(i8 [[Z:%.*]], i8 [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = add i8 [[TMP3]], -12
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP4]], [[MASK]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %mask0 = lshr i8 -1, %y
+ %mask = select i1 %cond, i8 %mask0, i8 0
+ %nx = xor i8 %x, 123
+ %nx_c = select i1 %c2, i8 %nx, i8 45
+ %nz = xor i8 %z, -1
+ %nx_cc = call i8 @llvm.umin.i8(i8 %nz, i8 %nx_c)
+ %nx_ccc = add i8 %nx_cc, 12
+ %or = or i8 %nx_ccc, %mask
+ %r = icmp eq i8 %or, -1
+ ret i1 %r
+}
+
+define i1 @src_x_or_mask_ne(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_or_mask_ne(
+; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %mask0 = lshr i8 -1, %y
+ %mask = select i1 %cond, i8 %mask0, i8 0
+ %nx = xor i8 %x, -1
+ %or = or i8 %mask, %nx
+ %r = icmp ne i8 %or, -1
+ ret i1 %r
+}
+
+define i1 @src_x_or_mask_ne_fail_multiuse(i8 %x, i8 %y, i1 %cond) {
+; CHECK-LABEL: @src_x_or_mask_ne_fail_multiuse(
+; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
+; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[MASK]], [[NX]]
+; CHECK-NEXT: call void @use.i8(i8 [[OR]])
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[OR]], -1
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %mask0 = lshr i8 -1, %y
+ %mask = select i1 %cond, i8 %mask0, i8 0
+ %nx = xor i8 %x, -1
+ %or = or i8 %mask, %nx
+ call void @use.i8(i8 %or)
+ %r = icmp ne i8 %or, -1
+ ret i1 %r
+}
diff --git a/llvm/test/Transforms/InstCombine/icmp-mul-and.ll b/llvm/test/Transforms/InstCombine/icmp-mul-and.ll
index d5f5641..7e7f087 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul-and.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul-and.ll
@@ -267,10 +267,10 @@ define i1 @pr51551_neg1(i32 %x, i32 %y) {
define i1 @pr51551_neg2(i32 %x, i32 %y) {
; CHECK-LABEL: @pr51551_neg2(
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Y:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[TMP1]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y:%.*]] to i1
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], 7
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT: [[DOTNOT:%.*]] = xor i1 [[TMP1]], true
; CHECK-NEXT: [[CMP:%.*]] = select i1 [[DOTNOT]], i1 true, i1 [[CMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
index adf7872..d858c91 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
@@ -128,12 +128,12 @@ define i1 @PR46561(i1 %a, i1 %x, i1 %y, i8 %z) {
; CHECK-NEXT: br i1 [[A:%.*]], label [[COND_TRUE:%.*]], label [[END:%.*]]
; CHECK: cond.true:
; CHECK-NEXT: [[MULBOOL:%.*]] = and i1 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[Z:%.*]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 [[Z:%.*]] to i1
; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[MULBOOL]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP2]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP3]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ]
; CHECK-NEXT: ret i1 [[P]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
new file mode 100644
index 0000000..bacdb54
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+define i1 @sgt_3_impliesF_eq_2(i8 %x, i8 %y) {
+; CHECK-LABEL: @sgt_3_impliesF_eq_2(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 4
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL:%.*]], [[X]]
+; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp = icmp sgt i8 %x, 3
+ %sel = select i1 %cmp, i8 2, i8 %y
+ %cmp2 = icmp eq i8 %sel, %x
+ ret i1 %cmp2
+}
+
+define i1 @sgt_3_impliesT_sgt_2(i8 %x, i8 %y) {
+; CHECK-LABEL: @sgt_3_impliesT_sgt_2(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 4
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[SEL:%.*]], [[X]]
+; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp = icmp sgt i8 %x, 3
+ %sel = select i1 %cmp, i8 2, i8 %y
+ %cmp2 = icmp sgt i8 %sel, %x
+ ret i1 %cmp2
+}
+
+define i1 @sgt_x_impliesF_eq_smin_todo(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @sgt_x_impliesF_eq_smin_todo(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 -128, i8 [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp sgt i8 %x, %z
+ %sel = select i1 %cmp, i8 -128, i8 %y
+ %cmp2 = icmp eq i8 %sel, %x
+ ret i1 %cmp2
+}
+
+define i1 @slt_x_impliesT_ne_smin_todo(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @slt_x_impliesT_ne_smin_todo(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 127, i8 [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp slt i8 %x, %z
+ %sel = select i1 %cmp, i8 127, i8 %y
+ %cmp2 = icmp ne i8 %x, %sel
+ ret i1 %cmp2
+}
+
+define i1 @ult_x_impliesT_eq_umax_todo(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @ult_x_impliesT_eq_umax_todo(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 -1, i8 [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp ugt i8 %z, %x
+ %sel = select i1 %cmp, i8 255, i8 %y
+ %cmp2 = icmp ne i8 %sel, %x
+ ret i1 %cmp2
+}
+
+define i1 @ult_1_impliesF_eq_1(i8 %x, i8 %y) {
+; CHECK-LABEL: @ult_1_impliesF_eq_1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[SEL:%.*]], 0
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[X:%.*]], [[SEL]]
+; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp = icmp ult i8 %x, 1
+ %sel = select i1 %cmp, i8 1, i8 %y
+ %cmp2 = icmp eq i8 %x, %sel
+ ret i1 %cmp2
+}
+
+define i1 @ugt_x_impliesF_eq_umin_todo(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @ugt_x_impliesF_eq_umin_todo(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 0, i8 [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp ugt i8 %z, %x
+ %sel = select i1 %cmp, i8 0, i8 %y
+ %cmp2 = icmp eq i8 %x, %sel
+ ret i1 %cmp2
+}
diff --git a/llvm/test/Transforms/InstCombine/intrinsic-select.ll b/llvm/test/Transforms/InstCombine/intrinsic-select.ll
index a203b28..f37226b 100644
--- a/llvm/test/Transforms/InstCombine/intrinsic-select.ll
+++ b/llvm/test/Transforms/InstCombine/intrinsic-select.ll
@@ -240,3 +240,43 @@ define i32 @vec_to_scalar_select_vector(<2 x i1> %b) {
%c = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %s)
ret i32 %c
}
+
+define i8 @test_drop_noundef(i1 %cond, i8 %val) {
+; CHECK-LABEL: @test_drop_noundef(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i8 @llvm.smin.i8(i8 [[VAL:%.*]], i8 0)
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[COND:%.*]], i8 -1, i8 [[TMP0]]
+; CHECK-NEXT: ret i8 [[RET]]
+;
+entry:
+ %sel = select i1 %cond, i8 -1, i8 %val
+ %ret = call noundef i8 @llvm.smin.i8(i8 %sel, i8 0)
+ ret i8 %ret
+}
+
+define i1 @pr85536(i32 %a) {
+; CHECK-LABEL: @pr85536(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 [[A:%.*]], 31
+; CHECK-NEXT: [[SHL1:%.*]] = shl nsw i32 -1, [[A]]
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[SHL1]] to i64
+; CHECK-NEXT: [[SHL2:%.*]] = shl i64 [[ZEXT]], 48
+; CHECK-NEXT: [[SHR:%.*]] = ashr exact i64 [[SHL2]], 48
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.smin.i64(i64 [[SHR]], i64 0)
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 65535
+; CHECK-NEXT: [[RET1:%.*]] = icmp eq i64 [[TMP1]], 0
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[CMP1]], i1 [[RET1]], i1 false
+; CHECK-NEXT: ret i1 [[RET]]
+;
+entry:
+ %cmp1 = icmp ugt i32 %a, 30
+ %shl1 = shl nsw i32 -1, %a
+ %zext = zext i32 %shl1 to i64
+ %shl2 = shl i64 %zext, 48
+ %shr = ashr exact i64 %shl2, 48
+ %sel = select i1 %cmp1, i64 -1, i64 %shr
+ %smin = call noundef i64 @llvm.smin.i64(i64 %sel, i64 0)
+ %masked = and i64 %smin, 65535
+ %ret = icmp eq i64 %masked, 0
+ ret i1 %ret
+}
diff --git a/llvm/test/Transforms/InstCombine/known-bits.ll b/llvm/test/Transforms/InstCombine/known-bits.ll
index 58c2838..5305c78 100644
--- a/llvm/test/Transforms/InstCombine/known-bits.ll
+++ b/llvm/test/Transforms/InstCombine/known-bits.ll
@@ -483,5 +483,56 @@ if.else:
ret i64 13
}
+define i1 @test_icmp_or_distjoint(i8 %n, i1 %other) {
+; CHECK-LABEL: @test_icmp_or_distjoint(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[N_OR:%.*]] = or disjoint i8 [[N:%.*]], 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[N_OR]], -111
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 true
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 [[OTHER:%.*]]
+;
+entry:
+ %n_or = or disjoint i8 %n, 16
+ %cmp = icmp ugt i8 %n_or, 145
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %r = icmp slt i8 %n, 0
+ ret i1 %r
+
+if.else:
+ ret i1 %other
+}
+
+define i1 @test_icmp_or_fail_missing_disjoint(i8 %n, i1 %other) {
+; CHECK-LABEL: @test_icmp_or_fail_missing_disjoint(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[N_OR:%.*]] = or i8 [[N:%.*]], 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[N_OR]], -111
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[N]], 0
+; CHECK-NEXT: ret i1 [[R]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 [[OTHER:%.*]]
+;
+entry:
+ %n_or = or i8 %n, 16
+ %cmp = icmp ugt i8 %n_or, 145
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %r = icmp slt i8 %n, 0
+ ret i1 %r
+
+if.else:
+ ret i1 %other
+}
+
+
+
declare void @use(i1)
declare void @sink(i8)
diff --git a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
index da7cc2d..e940ae3 100644
--- a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
+++ b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
@@ -214,9 +214,8 @@ define i64 @scalar_mul_bit_x0_y0_uses(i64 %x, i64 %y) {
define i64 @scalar_mul_bit_x0_y1(i64 %x, i64 %y) {
; CHECK-LABEL: @scalar_mul_bit_x0_y1(
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], 2
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[TMP1]], 0
-; CHECK-NEXT: [[MUL:%.*]] = select i1 [[DOTNOT]], i64 0, i64 [[AND2]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i1
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[TMP1]], i64 [[AND2]], i64 0
; CHECK-NEXT: ret i64 [[MUL]]
;
%and1 = and i64 %x, 1
@@ -228,9 +227,8 @@ define i64 @scalar_mul_bit_x0_y1(i64 %x, i64 %y) {
define i64 @scalar_mul_bit_x0_yC(i64 %x, i64 %y, i64 %c) {
; CHECK-LABEL: @scalar_mul_bit_x0_yC(
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[TMP1]], 0
-; CHECK-NEXT: [[MUL:%.*]] = select i1 [[DOTNOT]], i64 0, i64 [[AND2]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i1
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[TMP1]], i64 [[AND2]], i64 0
; CHECK-NEXT: ret i64 [[MUL]]
;
%and1 = and i64 %x, 1
diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll
index e7141d7..d4a689c6 100644
--- a/llvm/test/Transforms/InstCombine/mul.ll
+++ b/llvm/test/Transforms/InstCombine/mul.ll
@@ -684,9 +684,8 @@ define <2 x i32> @signbit_mul_vec_commute(<2 x i32> %a, <2 x i32> %b) {
define i32 @lowbit_mul(i32 %a, i32 %b) {
; CHECK-LABEL: @lowbit_mul(
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[TMP1]], 0
-; CHECK-NEXT: [[E:%.*]] = select i1 [[DOTNOT]], i32 0, i32 [[B:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[A:%.*]] to i1
+; CHECK-NEXT: [[E:%.*]] = select i1 [[TMP1]], i32 [[B:%.*]], i32 0
; CHECK-NEXT: ret i32 [[E]]
;
%d = and i32 %a, 1
@@ -2049,3 +2048,94 @@ define i32 @zext_negpow2_use(i8 %x) {
%r = mul i32 %zx, -16777216 ; -1 << 24
ret i32 %r
}
+
+define i32 @mul_sext_icmp_with_zero(i32 %x) {
+; CHECK-LABEL: @mul_sext_icmp_with_zero(
+; CHECK-NEXT: ret i32 0
+;
+ %cmp = icmp eq i32 %x, 0
+ %sext = sext i1 %cmp to i32
+ %mul = mul i32 %sext, %x
+ ret i32 %mul
+}
+
+define i32 @test_mul_sext_bool(i1 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_sext_bool(
+; CHECK-NEXT: [[Y_NEG:%.*]] = sub i32 0, [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[X:%.*]], i32 [[Y_NEG]], i32 0
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %sext = sext i1 %x to i32
+ %mul = mul i32 %sext, %y
+ ret i32 %mul
+}
+
+define i32 @test_mul_sext_bool_nuw(i1 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_sext_bool_nuw(
+; CHECK-NEXT: [[Y_NEG:%.*]] = sub i32 0, [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[X:%.*]], i32 [[Y_NEG]], i32 0
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %sext = sext i1 %x to i32
+ %mul = mul nuw i32 %sext, %y
+ ret i32 %mul
+}
+
+define i32 @test_mul_sext_bool_nsw(i1 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_sext_bool_nsw(
+; CHECK-NEXT: [[Y_NEG:%.*]] = sub nsw i32 0, [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[X:%.*]], i32 [[Y_NEG]], i32 0
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %sext = sext i1 %x to i32
+ %mul = mul nsw i32 %sext, %y
+ ret i32 %mul
+}
+
+define i32 @test_mul_sext_bool_nuw_nsw(i1 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_sext_bool_nuw_nsw(
+; CHECK-NEXT: [[Y_NEG:%.*]] = sub nsw i32 0, [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[X:%.*]], i32 [[Y_NEG]], i32 0
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %sext = sext i1 %x to i32
+ %mul = mul nuw nsw i32 %sext, %y
+ ret i32 %mul
+}
+
+define i32 @test_mul_sext_bool_commuted(i1 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_sext_bool_commuted(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], -2
+; CHECK-NEXT: [[YY_NEG1:%.*]] = add i32 [[TMP1]], 1
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[X:%.*]], i32 [[YY_NEG1]], i32 0
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %yy = xor i32 %y, 1
+ %sext = sext i1 %x to i32
+ %mul = mul i32 %yy, %sext
+ ret i32 %mul
+}
+
+define i32 @test_mul_sext_nonbool(i2 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_sext_nonbool(
+; CHECK-NEXT: [[SEXT:%.*]] = sext i2 [[X:%.*]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[SEXT]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %sext = sext i2 %x to i32
+ %mul = mul i32 %sext, %y
+ ret i32 %mul
+}
+
+define i32 @test_mul_sext_multiuse(i1 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_sext_multiuse(
+; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[X:%.*]] to i32
+; CHECK-NEXT: tail call void @use(i32 [[SEXT]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[SEXT]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %sext = sext i1 %x to i32
+ tail call void @use(i32 %sext)
+ %mul = mul i32 %sext, %y
+ ret i32 %mul
+}
diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll
index f277d13..98b5d980 100644
--- a/llvm/test/Transforms/InstCombine/not.ll
+++ b/llvm/test/Transforms/InstCombine/not.ll
@@ -3,6 +3,8 @@
declare void @use1(i1)
declare void @use8(i8)
+declare void @f1()
+declare void @f2()
define i32 @test1(i32 %A) {
; CHECK-LABEL: @test1(
@@ -858,3 +860,204 @@ define i32 @test_zext(i32 %a, i32 %b){
%not = xor i32 %add, -1
ret i32 %not
}
+
+define void @test_invert_demorgan_or(i32 %a, i32 %b, i1 %cond) {
+; CHECK-LABEL: @test_invert_demorgan_or(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[B:%.*]], 0
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[B1:%.*]], 0
+; CHECK-NEXT: [[OR_NOT1:%.*]] = and i1 [[CMP2]], [[CMP3]]
+; CHECK-NEXT: [[MERGE:%.*]] = and i1 [[OR_NOT1]], [[COND:%.*]]
+; CHECK-NEXT: br i1 [[MERGE]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: call void @f1()
+; CHECK-NEXT: unreachable
+; CHECK: if.else:
+; CHECK-NEXT: call void @f2()
+; CHECK-NEXT: unreachable
+;
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ %cmp2 = icmp ne i32 %b, 0
+ %or = or i1 %cmp1, %cmp2
+ %not = xor i1 %cond, true
+ %merge = or i1 %not, %or
+ br i1 %merge, label %if.then, label %if.else
+if.then:
+ call void @f1()
+ unreachable
+if.else:
+ call void @f2()
+ unreachable
+}
+
+define i1 @test_invert_demorgan_or2(i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: @test_invert_demorgan_or2(
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i64 [[A:%.*]], 24
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[B:%.*]], 60
+; CHECK-NEXT: [[OR1_NOT1:%.*]] = and i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[C:%.*]], 60
+; CHECK-NEXT: [[NOT:%.*]] = and i1 [[OR1_NOT1]], [[CMP3]]
+; CHECK-NEXT: ret i1 [[NOT]]
+;
+ %cmp1 = icmp ugt i64 %a, 23
+ %cmp2 = icmp ugt i64 %b, 59
+ %or1 = or i1 %cmp1, %cmp2
+ %cmp3 = icmp ugt i64 %c, 59
+ %or2 = or i1 %or1, %cmp3
+ %not = xor i1 %or2, true
+ ret i1 %not
+}
+
+define i1 @test_invert_demorgan_or3(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_invert_demorgan_or3(
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A:%.*]], 178206
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], -196608
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[TMP1]], -1506
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[B]], -917760
+; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[TMP2]], -716213
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[B]], -1114112
+; CHECK-NEXT: [[CMP4:%.*]] = icmp ult i32 [[TMP3]], -196112
+; CHECK-NEXT: [[OR1_NOT2:%.*]] = and i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT: [[OR2_NOT1:%.*]] = and i1 [[OR1_NOT2]], [[CMP3]]
+; CHECK-NEXT: [[NOT:%.*]] = and i1 [[OR2_NOT1]], [[CMP4]]
+; CHECK-NEXT: ret i1 [[NOT]]
+;
+ %cmp1 = icmp eq i32 %a, 178206
+ %v1 = add i32 %b, -195102
+ %cmp2 = icmp ult i32 %v1, 1506
+ %v2 = add i32 %b, -201547
+ %cmp3 = icmp ult i32 %v2, 716213
+ %v3 = add i32 %b, -918000
+ %cmp4 = icmp ult i32 %v3, 196112
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %or1, %cmp3
+ %or3 = or i1 %or2, %cmp4
+ %not = xor i1 %or3, true
+ ret i1 %not
+}
+
+define i1 @test_invert_demorgan_logical_or(i64 %x, i64 %y) {
+; CHECK-LABEL: @test_invert_demorgan_logical_or(
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i64 [[X:%.*]], 27
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[Y:%.*]], 0
+; CHECK-NEXT: [[SEL_NOT1:%.*]] = select i1 [[CMP1]], i1 [[CMP2]], i1 false
+; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[X]], 0
+; CHECK-NEXT: [[NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]]
+; CHECK-NEXT: ret i1 [[NOT]]
+;
+ %cmp1 = icmp eq i64 %x, 27
+ %cmp2 = icmp eq i64 %y, 0
+ %sel = select i1 %cmp1, i1 true, i1 %cmp2
+ %cmp3 = icmp eq i64 %x, 0
+ %or = or i1 %cmp3, %sel
+ %not = xor i1 %or, true
+ ret i1 %not
+}
+
+define i1 @test_invert_demorgan_and(i32 %a, i32 %b, i1 %cond) {
+; CHECK-LABEL: @test_invert_demorgan_and(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[B:%.*]], 0
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[B1:%.*]], 0
+; CHECK-NEXT: [[AND_NOT1:%.*]] = or i1 [[CMP2]], [[CMP3]]
+; CHECK-NEXT: [[MERGE:%.*]] = or i1 [[AND_NOT1]], [[COND:%.*]]
+; CHECK-NEXT: br i1 [[MERGE]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: call void @f1()
+; CHECK-NEXT: unreachable
+; CHECK: if.else:
+; CHECK-NEXT: call void @f2()
+; CHECK-NEXT: unreachable
+;
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ %cmp2 = icmp ne i32 %b, 0
+ %and = and i1 %cmp1, %cmp2
+ %not = xor i1 %cond, true
+ %merge = and i1 %not, %and
+ br i1 %merge, label %if.then, label %if.else
+if.then:
+ call void @f1()
+ unreachable
+if.else:
+ call void @f2()
+ unreachable
+}
+
+define i64 @test_invert_demorgan_and2(i64 %x) {
+; CHECK-LABEL: @test_invert_demorgan_and2(
+; CHECK-NEXT: [[TMP1:%.*]] = sub i64 0, [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = or i64 [[TMP1]], -9223372036854775808
+; CHECK-NEXT: ret i64 [[SUB]]
+;
+ %add = add i64 %x, 9223372036854775807
+ %and = and i64 %add, 9223372036854775807
+ %sub = xor i64 %and, -1
+ ret i64 %sub
+}
+
+define i1 @test_invert_demorgan_and3(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_invert_demorgan_and3(
+; CHECK-NEXT: [[ADD:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD]], 4095
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 4095
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %not = xor i32 %a, -1
+ %add = add i32 %b, %not
+ %and = and i32 %add, 4095
+ %cmp = icmp eq i32 %and, 0
+ ret i1 %cmp
+}
+
+define i1 @test_invert_demorgan_logical_and(i64 %x, i64 %y) {
+; CHECK-LABEL: @test_invert_demorgan_logical_and(
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i64 [[X:%.*]], 27
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[Y:%.*]], 0
+; CHECK-NEXT: [[SEL_NOT1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP2]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[X]], 0
+; CHECK-NEXT: [[NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]]
+; CHECK-NEXT: ret i1 [[NOT]]
+;
+ %cmp1 = icmp eq i64 %x, 27
+ %cmp2 = icmp eq i64 %y, 0
+ %sel = select i1 %cmp1, i1 %cmp2, i1 false
+ %cmp3 = icmp eq i64 %x, 0
+ %or = or i1 %cmp3, %sel
+ %not = xor i1 %or, true
+ ret i1 %not
+}
+
+define i1 @test_invert_demorgan_and_multiuse(i32 %a, i32 %b, i1 %cond) {
+; CHECK-LABEL: @test_invert_demorgan_and_multiuse(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[A:%.*]], 0
+; CHECK-NEXT: call void @use1(i1 [[CMP1]])
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[B:%.*]], 0
+; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[COND:%.*]], true
+; CHECK-NEXT: [[TMP0:%.*]] = and i1 [[CMP2]], [[NOT]]
+; CHECK-NEXT: [[MERGE:%.*]] = and i1 [[TMP0]], [[CMP1]]
+; CHECK-NEXT: br i1 [[MERGE]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: call void @f1()
+; CHECK-NEXT: unreachable
+; CHECK: if.else:
+; CHECK-NEXT: call void @f2()
+; CHECK-NEXT: unreachable
+;
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ call void @use1(i1 %cmp1)
+ %cmp2 = icmp ne i32 %b, 0
+ %and = and i1 %cmp1, %cmp2
+ %not = xor i1 %cond, true
+ %merge = and i1 %not, %and
+ br i1 %merge, label %if.then, label %if.else
+if.then:
+ call void @f1()
+ unreachable
+if.else:
+ call void @f2()
+ unreachable
+}
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index e1ae6c1..7eb508e 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -116,8 +116,8 @@ define i32 @test6(i16 %A, i1 %b) {
; CHECK: BB1:
; CHECK-NEXT: br label [[BB2]]
; CHECK: BB2:
-; CHECK-NEXT: [[B:%.*]] = zext i16 [[A:%.*]] to i32
-; CHECK-NEXT: ret i32 [[B]]
+; CHECK-NEXT: [[C:%.*]] = zext i16 [[A:%.*]] to i32
+; CHECK-NEXT: ret i32 [[C]]
;
BB0:
%X = zext i16 %A to i32
@@ -129,8 +129,8 @@ BB1:
BB2:
;; Suck casts into phi
- %B = phi i32 [ %X, %BB0 ], [ %Y, %BB1 ]
- ret i32 %B
+ %c = phi i32 [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret i32 %c
}
define i32 @test_dead_cycle(i32 %A, i1 %cond) {
@@ -232,8 +232,8 @@ define ptr @test8(ptr %A, i1 %b) {
; CHECK: BB1:
; CHECK-NEXT: br label [[BB2]]
; CHECK: BB2:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; CHECK-NEXT: ret ptr [[B]]
+; CHECK-NEXT: [[C:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
+; CHECK-NEXT: ret ptr [[C]]
;
BB0:
%X = getelementptr inbounds { i32, i32 }, ptr %A, i32 0, i32 1
@@ -245,8 +245,8 @@ BB1:
BB2:
;; Suck GEPs into phi
- %B = phi ptr [ %X, %BB0 ], [ %Y, %BB1 ]
- ret ptr %B
+ %c = phi ptr [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret ptr %c
}
define i32 @test9(ptr %A, ptr %B) {
@@ -489,9 +489,8 @@ define i64 @test15b(i64 %A, i1 %b) {
; CHECK-NEXT: [[Y_OFF0:%.*]] = phi i64 [ [[A]], [[ENTRY]] ], [ [[C]], [[ONE]] ]
; CHECK-NEXT: [[Y_OFF64]] = phi i64 [ [[A]], [[ENTRY]] ], [ 0, [[ONE]] ]
; CHECK-NEXT: [[D:%.*]] = call i64 @test15a(i64 [[Y_OFF64]])
-; CHECK-NEXT: [[TMP0:%.*]] = and i64 [[D]], 1
-; CHECK-NEXT: [[D1_NOT:%.*]] = icmp eq i64 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[D1_NOT]], label [[END:%.*]], label [[ONE]]
+; CHECK-NEXT: [[D1:%.*]] = trunc i64 [[D]] to i1
+; CHECK-NEXT: br i1 [[D1]], label [[ONE]], label [[END:%.*]]
; CHECK: end:
; CHECK-NEXT: ret i64 [[Y_OFF0]]
;
diff --git a/llvm/test/Transforms/InstCombine/powi.ll b/llvm/test/Transforms/InstCombine/powi.ll
index 89efbb6..6c0575e 100644
--- a/llvm/test/Transforms/InstCombine/powi.ll
+++ b/llvm/test/Transforms/InstCombine/powi.ll
@@ -125,22 +125,55 @@ entry:
ret double %mul
}
-define double @powi_fmul_powi_no_reassoc(double %x, i32 %y, i32 %z) {
-; CHECK-LABEL: @powi_fmul_powi_no_reassoc(
+; Negative test: Missing reassoc flag on fmul
+define double @powi_fmul_powi_no_reassoc1(double %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @powi_fmul_powi_no_reassoc1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
; CHECK-NEXT: [[MUL:%.*]] = fmul double [[P2]], [[P1]]
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul double %p2, %p1
ret double %mul
}
+; Negative test: Missing reassoc flag on 2nd operand
+define double @powi_fmul_powi_no_reassoc2(double %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @powi_fmul_powi_no_reassoc2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+entry:
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %mul = fmul reassoc double %p2, %p1
+ ret double %mul
+}
+
+; Negative test: Missing reassoc flag on 1st operand
+define double @powi_fmul_powi_no_reassoc3(double %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @powi_fmul_powi_no_reassoc3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+entry:
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
+ %mul = fmul reassoc double %p2, %p1
+ ret double %mul
+}
+; All of the fmul and its operands should have the reassoc flags
define double @powi_fmul_powi(double %x, i32 %y, i32 %z) {
; CHECK-LABEL: @powi_fmul_powi(
; CHECK-NEXT: entry:
@@ -149,8 +182,8 @@ define double @powi_fmul_powi(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -163,8 +196,8 @@ define double @powi_fmul_powi_fast_on_fmul(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul fast double %p2, %p1
ret double %mul
}
@@ -192,8 +225,23 @@ define double @powi_fmul_powi_same_power(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %mul = fmul reassoc double %p2, %p1
+ ret double %mul
+}
+
+define double @powi_fmul_powi_different_integer_types(double %x, i32 %y, i16 %z) {
+; CHECK-LABEL: @powi_fmul_powi_different_integer_types(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i16(double [[X]], i16 [[Z:%.*]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+entry:
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i16(double %x, i16 %z)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -201,16 +249,16 @@ entry:
define double @powi_fmul_powi_use_first(double %x, i32 %y, i32 %z) {
; CHECK-LABEL: @powi_fmul_powi_use_first(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
; CHECK-NEXT: tail call void @use(double [[P1]])
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y]], [[Z:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]])
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
tail call void @use(double %p1)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul reassoc double %p1, %p2
ret double %mul
}
@@ -218,16 +266,16 @@ entry:
define double @powi_fmul_powi_use_second(double %x, i32 %y, i32 %z) {
; CHECK-LABEL: @powi_fmul_powi_use_second(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Z:%.*]])
; CHECK-NEXT: tail call void @use(double [[P1]])
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y:%.*]], [[Z]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]])
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
tail call void @use(double %p1)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -265,7 +313,7 @@ define double @fdiv_pow_powi(double %x) {
; CHECK-NEXT: [[DIV:%.*]] = fmul reassoc nnan double [[X:%.*]], [[X]]
; CHECK-NEXT: ret double [[DIV]]
;
- %p1 = call double @llvm.powi.f64.i32(double %x, i32 3)
+ %p1 = call reassoc double @llvm.powi.f64.i32(double %x, i32 3)
%div = fdiv reassoc nnan double %p1, %x
ret double %div
}
@@ -275,7 +323,7 @@ define float @fdiv_powf_powi(float %x) {
; CHECK-NEXT: [[DIV:%.*]] = call reassoc nnan float @llvm.powi.f32.i32(float [[X:%.*]], i32 99)
; CHECK-NEXT: ret float [[DIV]]
;
- %p1 = call float @llvm.powi.f32.i32(float %x, i32 100)
+ %p1 = call reassoc float @llvm.powi.f32.i32(float %x, i32 100)
%div = fdiv reassoc nnan float %p1, %x
ret float %div
}
@@ -299,10 +347,21 @@ define double @fdiv_pow_powi_multi_use(double %x) {
define float @fdiv_powf_powi_missing_reassoc(float %x) {
; CHECK-LABEL: @fdiv_powf_powi_missing_reassoc(
; CHECK-NEXT: [[P1:%.*]] = call float @llvm.powi.f32.i32(float [[X:%.*]], i32 100)
-; CHECK-NEXT: [[DIV:%.*]] = fdiv nnan float [[P1]], [[X]]
+; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan float [[P1]], [[X]]
; CHECK-NEXT: ret float [[DIV]]
;
%p1 = call float @llvm.powi.f32.i32(float %x, i32 100)
+ %div = fdiv reassoc nnan float %p1, %x
+ ret float %div
+}
+
+define float @fdiv_powf_powi_missing_reassoc1(float %x) {
+; CHECK-LABEL: @fdiv_powf_powi_missing_reassoc1(
+; CHECK-NEXT: [[P1:%.*]] = call reassoc float @llvm.powi.f32.i32(float [[X:%.*]], i32 100)
+; CHECK-NEXT: [[DIV:%.*]] = fdiv nnan float [[P1]], [[X]]
+; CHECK-NEXT: ret float [[DIV]]
+;
+ %p1 = call reassoc float @llvm.powi.f32.i32(float %x, i32 100)
%div = fdiv nnan float %p1, %x
ret float %div
}
@@ -333,11 +392,60 @@ define double @fdiv_pow_powi_negative(double %x) {
; Negative test: The 2nd powi argument is a variable
define double @fdiv_pow_powi_negative_variable(double %x, i32 %y) {
; CHECK-LABEL: @fdiv_pow_powi_negative_variable(
-; CHECK-NEXT: [[P1:%.*]] = call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[P1]], [[X]]
; CHECK-NEXT: ret double [[DIV]]
;
- %p1 = call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
%div = fdiv reassoc nnan double %p1, %x
ret double %div
}
+
+; powi(X, Y) * X --> powi(X, Y+1)
+define double @powi_fmul_powi_x(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x(
+; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 4)
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 3)
+ %mul = fmul reassoc double %p1, %x
+ ret double %mul
+}
+
+; Negative test: Multi-use
+define double @powi_fmul_powi_x_multi_use(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x_multi_use(
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 3)
+; CHECK-NEXT: tail call void @use(double [[P1]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P1]], [[X]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3)
+ tail call void @use(double %p1)
+ %mul = fmul reassoc double %p1, %x
+ ret double %mul
+}
+
+; Negative test: Miss fmf flag
+define double @powi_fmul_powi_x_missing_reassoc(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x_missing_reassoc(
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 3)
+; CHECK-NEXT: [[MUL:%.*]] = fmul double [[P1]], [[X]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3)
+ %mul = fmul double %p1, %x
+ ret double %mul
+}
+
+; Negative test: overflow
+define double @powi_fmul_powi_x_overflow(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x_overflow(
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 2147483647)
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P1]], [[X]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 2147483647) ; INT_MAX
+ %mul = fmul reassoc double %p1, %x
+ ret double %mul
+}
diff --git a/llvm/test/Transforms/InstCombine/pr63791.ll b/llvm/test/Transforms/InstCombine/pr63791.ll
index 78cc113..73a559f9 100644
--- a/llvm/test/Transforms/InstCombine/pr63791.ll
+++ b/llvm/test/Transforms/InstCombine/pr63791.ll
@@ -15,7 +15,7 @@ define void @y() {
; CHECK-NEXT: store i1 true, ptr poison, align 1
; CHECK-NEXT: br i1 poison, label [[FOR_COND_I]], label [[FOR_COND5_PREHEADER_I]]
; CHECK: for.cond5.preheader.i:
-; CHECK-NEXT: br i1 false, label [[FOR_INC19_I:%.*]], label [[FOR_COND1_LOOPEXIT_I:%.*]]
+; CHECK-NEXT: br i1 true, label [[FOR_COND1_LOOPEXIT_I:%.*]], label [[FOR_INC19_I:%.*]]
; CHECK: for.inc19.i:
; CHECK-NEXT: br i1 poison, label [[FOR_INC19_I]], label [[FOR_COND1_LOOPEXIT_I]]
;
diff --git a/llvm/test/Transforms/InstCombine/ptr-int-cast.ll b/llvm/test/Transforms/InstCombine/ptr-int-cast.ll
index 6f5814e..69b8f69 100644
--- a/llvm/test/Transforms/InstCombine/ptr-int-cast.ll
+++ b/llvm/test/Transforms/InstCombine/ptr-int-cast.ll
@@ -6,8 +6,7 @@ define i1 @test1(ptr %x) nounwind {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[X:%.*]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i1
; CHECK-NEXT: ret i1 [[TMP2]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
index bbb8d84..ad55b50 100644
--- a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
+++ b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
@@ -5,8 +5,7 @@ define i1 @reduce_add_self(<8 x i1> %x) {
; CHECK-LABEL: @reduce_add_self(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], 1
-; CHECK-NEXT: [[RES:%.*]] = icmp ne i8 [[TMP3]], 0
+; CHECK-NEXT: [[RES:%.*]] = trunc i8 [[TMP2]] to i1
; CHECK-NEXT: ret i1 [[RES]]
;
%res = call i1 @llvm.vector.reduce.add.v8i32(<8 x i1> %x)
diff --git a/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll b/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
index 97b6f7b..84ac936 100644
--- a/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
+++ b/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
@@ -5,8 +5,7 @@ define i1 @reduce_xor_self(<8 x i1> %x) {
; CHECK-LABEL: @reduce_xor_self(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], 1
-; CHECK-NEXT: [[RES:%.*]] = icmp ne i8 [[TMP3]], 0
+; CHECK-NEXT: [[RES:%.*]] = trunc i8 [[TMP2]] to i1
; CHECK-NEXT: ret i1 [[RES]]
;
%res = call i1 @llvm.vector.reduce.xor.v8i32(<8 x i1> %x)
@@ -17,9 +16,8 @@ define i32 @reduce_xor_sext(<4 x i1> %x) {
; CHECK-LABEL: @reduce_xor_sext(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i1> [[X:%.*]] to i4
; CHECK-NEXT: [[TMP2:%.*]] = call i4 @llvm.ctpop.i4(i4 [[TMP1]]), !range [[RNG1:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i4 [[TMP2]], 1
-; CHECK-NEXT: [[SEXT:%.*]] = sub nsw i4 0, [[TMP3]]
-; CHECK-NEXT: [[RES:%.*]] = sext i4 [[SEXT]] to i32
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i4 [[TMP2]] to i1
+; CHECK-NEXT: [[RES:%.*]] = sext i1 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[RES]]
;
%sext = sext <4 x i1> %x to <4 x i32>
@@ -57,9 +55,8 @@ define i8 @reduce_xor_zext_long(<128 x i1> %x) {
; CHECK-LABEL: @reduce_xor_zext_long(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
; CHECK-NEXT: [[TMP2:%.*]] = call i128 @llvm.ctpop.i128(i128 [[TMP1]]), !range [[RNG3:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i8
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP3]], 1
-; CHECK-NEXT: [[RES:%.*]] = sub nsw i8 0, [[TMP4]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i1
+; CHECK-NEXT: [[RES:%.*]] = sext i1 [[TMP3]] to i8
; CHECK-NEXT: ret i8 [[RES]]
;
%sext = sext <128 x i1> %x to <128 x i8>
@@ -72,9 +69,8 @@ define i8 @reduce_xor_zext_long_external_use(<128 x i1> %x) {
; CHECK-LABEL: @reduce_xor_zext_long_external_use(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
; CHECK-NEXT: [[TMP2:%.*]] = call i128 @llvm.ctpop.i128(i128 [[TMP1]]), !range [[RNG3]]
-; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i8
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP3]], 1
-; CHECK-NEXT: [[RES:%.*]] = sub nsw i8 0, [[TMP4]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i1
+; CHECK-NEXT: [[RES:%.*]] = sext i1 [[TMP3]] to i8
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP5]] to i8
; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
diff --git a/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll b/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
index 4b37ccb..729ca03 100644
--- a/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
+++ b/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
@@ -122,3 +122,35 @@ define { i32, i1 } @fold_sub_simple(i32 %x) {
%b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 30)
ret { i32, i1 } %b
}
+
+define { i32, i1 } @fold_with_distjoin_or(i32 %x) {
+; CHECK-LABEL: @fold_with_distjoin_or(
+; CHECK-NEXT: [[B:%.*]] = add i32 [[X:%.*]], 6
+; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[B]], 0
+; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
+;
+ %a = or disjoint i32 %x, 13
+ %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 -7)
+ ret { i32, i1 } %b
+}
+
+define { i32, i1 } @fold_with_disjoint_or2(i32 %x) {
+; CHECK-LABEL: @fold_with_disjoint_or2(
+; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 127)
+; CHECK-NEXT: ret { i32, i1 } [[B]]
+;
+ %a = or disjoint i32 %x, 100
+ %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 27)
+ ret { i32, i1 } %b
+}
+
+define { i32, i1 } @fold_with_or_fail(i32 %x) {
+; CHECK-LABEL: @fold_with_or_fail(
+; CHECK-NEXT: [[A:%.*]] = or i32 [[X:%.*]], 100
+; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A]], i32 27)
+; CHECK-NEXT: ret { i32, i1 } [[B]]
+;
+ %a = or i32 %x, 100
+ %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 27)
+ ret { i32, i1 } %b
+}
diff --git a/llvm/test/Transforms/InstCombine/scalarization.ll b/llvm/test/Transforms/InstCombine/scalarization.ll
index fe6dc52..7e645ef 100644
--- a/llvm/test/Transforms/InstCombine/scalarization.ll
+++ b/llvm/test/Transforms/InstCombine/scalarization.ll
@@ -341,6 +341,17 @@ define i1 @extractelt_vector_fcmp_constrhs_dynidx(<2 x float> %arg, i32 %idx) {
ret i1 %ext
}
+define i1 @extractelt_vector_fcmp_copy_flags(<4 x float> %x) {
+; CHECK-LABEL: @extractelt_vector_fcmp_copy_flags(
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp nsz arcp oeq float [[TMP1]], 0.000000e+00
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp = fcmp nsz arcp oeq <4 x float> %x, zeroinitializer
+ %r = extractelement <4 x i1> %cmp, i32 2
+ ret i1 %r
+}
+
define i1 @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(<2 x float> %arg0, <2 x float> %arg1, <2 x float> %arg2, i32 %idx) {
;
; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(
diff --git a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
index 496854c..77ff16a 100644
--- a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
+++ b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
@@ -1,8 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define float @select_fadd(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fadd(
+define float @select_maybe_nan_fadd(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_maybe_nan_fadd(
+; CHECK-NEXT: [[C:%.*]] = fadd float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[D:%.*]] = select i1 [[COND:%.*]], float [[C]], float [[A]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fadd float %A, %B
+ %D = select i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fadd(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fadd(
; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
@@ -12,41 +23,52 @@ define float @select_fadd(i1 %cond, float %A, float %B) {
ret float %D
}
-define float @select_fadd_swapped(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fadd_swapped(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
+define float @select_nnan_fadd(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fadd(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fadd_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fadd_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
+define float @select_nnan_fadd_swapped(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fadd_swapped(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fadd float %A, %B
+ %D = select nnan i1 %cond, float %A, float %C
+ ret float %D
+}
+
+define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fadd_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
; CHECK-NEXT: [[D:%.*]] = fadd fast float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd fast float %A, %B
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fadd_swapped_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fadd_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
+define float @select_nnan_fadd_swapped_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fadd_swapped_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fadd fast float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd fast float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %A, float %C
ret float %D
}
-define <4 x float> @select_nsz_fadd_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) {
-; CHECK-LABEL: @select_nsz_fadd_v4f32(
+define <4 x float> @select_nnan_nsz_fadd_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: @select_nnan_nsz_fadd_v4f32(
; CHECK-NEXT: [[C:%.*]] = select nnan nsz <4 x i1> [[COND:%.*]], <4 x float> [[B:%.*]], <4 x float> zeroinitializer
; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <4 x float> [[C]], [[A:%.*]]
; CHECK-NEXT: ret <4 x float> [[D]]
@@ -56,202 +78,202 @@ define <4 x float> @select_nsz_fadd_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x f
ret <4 x float> %D
}
-define <vscale x 4 x float> @select_nsz_fadd_nxv4f32(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
-; CHECK-LABEL: @select_nsz_fadd_nxv4f32(
+define <vscale x 4 x float> @select_nnan_nsz_fadd_nxv4f32(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
+; CHECK-LABEL: @select_nnan_nsz_fadd_nxv4f32(
; CHECK-NEXT: [[C:%.*]] = select nnan nsz <vscale x 4 x i1> [[COND:%.*]], <vscale x 4 x float> [[B:%.*]], <vscale x 4 x float> zeroinitializer
; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <vscale x 4 x float> [[C]], [[A:%.*]]
; CHECK-NEXT: ret <vscale x 4 x float> [[D]]
;
- %C = fadd nsz nnan <vscale x 4 x float> %A, %B
- %D = select nsz nnan <vscale x 4 x i1> %cond, <vscale x 4 x float> %C, <vscale x 4 x float> %A
+ %C = fadd nnan nsz <vscale x 4 x float> %A, %B
+ %D = select nnan nsz <vscale x 4 x i1> %cond, <vscale x 4 x float> %C, <vscale x 4 x float> %A
ret <vscale x 4 x float> %D
}
-define <vscale x 4 x float> @select_nsz_fadd_nxv4f32_swapops(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
-; CHECK-LABEL: @select_nsz_fadd_nxv4f32_swapops(
+define <vscale x 4 x float> @select_nnan_nsz_fadd_nxv4f32_swapops(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
+; CHECK-LABEL: @select_nnan_nsz_fadd_nxv4f32_swapops(
; CHECK-NEXT: [[C:%.*]] = select fast <vscale x 4 x i1> [[COND:%.*]], <vscale x 4 x float> zeroinitializer, <vscale x 4 x float> [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fadd fast <vscale x 4 x float> [[C]], [[A:%.*]]
; CHECK-NEXT: ret <vscale x 4 x float> [[D]]
;
%C = fadd fast <vscale x 4 x float> %A, %B
- %D = select fast <vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %C
+ %D = select nnan fast <vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %C
ret <vscale x 4 x float> %D
}
-define float @select_fmul(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fmul(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+define float @select_nnan_fmul(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fmul(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
; CHECK-NEXT: [[D:%.*]] = fmul float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul float %A, %B
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fmul_swapped(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fmul_swapped(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
+define float @select_nnan_fmul_swapped(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fmul_swapped(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fmul float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %A, float %C
ret float %D
}
-define float @select_fmul_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fmul_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+define float @select_nnan_fmul_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fmul_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
; CHECK-NEXT: [[D:%.*]] = fmul fast float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul fast float %A, %B
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fmul_swapped_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fmul_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
+define float @select_nnan_fmul_swapped_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fmul_swapped_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fmul fast float [[C]], [[A:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul fast float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %A, float %C
ret float %D
}
-define float @select_fsub(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fsub(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
+define float @select_nnan_fsub(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fsub(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fsub float %A, %B
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fsub_swapped(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fsub_swapped(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]]
+define float @select_nnan_fsub_swapped(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fsub_swapped(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fsub float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %A, float %C
ret float %D
}
-define float @select_fsub_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fsub_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
+define float @select_nnan_fsub_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fsub_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
; CHECK-NEXT: [[D:%.*]] = fsub fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fsub fast float %A, %B
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fsub_swapped_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fsub_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]]
+define float @select_nnan_fsub_swapped_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fsub_swapped_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fsub fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fsub fast float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %A, float %C
ret float %D
}
-define <4 x float> @select_nsz_fsub_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) {
-; CHECK-LABEL: @select_nsz_fsub_v4f32(
-; CHECK-NEXT: [[C:%.*]] = select nsz <4 x i1> [[COND:%.*]], <4 x float> [[B:%.*]], <4 x float> zeroinitializer
+define <4 x float> @select_nnan_nsz_fsub_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: @select_nnan_nsz_fsub_v4f32(
+; CHECK-NEXT: [[C:%.*]] = select nnan nsz <4 x i1> [[COND:%.*]], <4 x float> [[B:%.*]], <4 x float> zeroinitializer
; CHECK-NEXT: [[D:%.*]] = fsub <4 x float> [[A:%.*]], [[C]]
; CHECK-NEXT: ret <4 x float> [[D]]
;
%C = fsub <4 x float> %A, %B
- %D = select nsz <4 x i1> %cond, <4 x float> %C, <4 x float> %A
+ %D = select nnan nsz <4 x i1> %cond, <4 x float> %C, <4 x float> %A
ret <4 x float> %D
}
-define <vscale x 4 x float> @select_nsz_fsub_nxv4f32(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
-; CHECK-LABEL: @select_nsz_fsub_nxv4f32(
-; CHECK-NEXT: [[C:%.*]] = select nsz <vscale x 4 x i1> [[COND:%.*]], <vscale x 4 x float> [[B:%.*]], <vscale x 4 x float> zeroinitializer
+define <vscale x 4 x float> @select_nnan_nsz_fsub_nxv4f32(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
+; CHECK-LABEL: @select_nnan_nsz_fsub_nxv4f32(
+; CHECK-NEXT: [[C:%.*]] = select nnan nsz <vscale x 4 x i1> [[COND:%.*]], <vscale x 4 x float> [[B:%.*]], <vscale x 4 x float> zeroinitializer
; CHECK-NEXT: [[D:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[C]]
; CHECK-NEXT: ret <vscale x 4 x float> [[D]]
;
%C = fsub <vscale x 4 x float> %A, %B
- %D = select nsz <vscale x 4 x i1> %cond, <vscale x 4 x float> %C, <vscale x 4 x float> %A
+ %D = select nnan nsz <vscale x 4 x i1> %cond, <vscale x 4 x float> %C, <vscale x 4 x float> %A
ret <vscale x 4 x float> %D
}
; 'fsub' can only fold on the amount subtracted.
-define float @select_fsub_invalid(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fsub_invalid(
+define float @select_nnan_fsub_invalid(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fsub_invalid(
; CHECK-NEXT: [[C:%.*]] = fsub float [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[COND:%.*]], float [[C]], float [[A]]
+; CHECK-NEXT: [[D:%.*]] = select nnan i1 [[COND:%.*]], float [[C]], float [[A]]
; CHECK-NEXT: ret float [[D]]
;
%C = fsub float %B, %A
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fdiv(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fdiv(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+define float @select_nnan_fdiv(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fdiv(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
; CHECK-NEXT: [[D:%.*]] = fdiv float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fdiv float %A, %B
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fdiv_swapped(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fdiv_swapped(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
+define float @select_nnan_fdiv_swapped(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fdiv_swapped(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fdiv float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fdiv float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %A, float %C
ret float %D
}
-define float @select_fdiv_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fdiv_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+define float @select_nnan_fdiv_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fdiv_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
; CHECK-NEXT: [[D:%.*]] = fdiv fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fdiv fast float %A, %B
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
-define float @select_fdiv_swapped_fast_math(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fdiv_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
+define float @select_nnan_fdiv_swapped_fast_math(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fdiv_swapped_fast_math(
+; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fdiv fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fdiv fast float %A, %B
- %D = select i1 %cond, float %A, float %C
+ %D = select nnan i1 %cond, float %A, float %C
ret float %D
}
; 'fdiv' can only fold on the divisor amount.
-define float @select_fdiv_invalid(i1 %cond, float %A, float %B) {
-; CHECK-LABEL: @select_fdiv_invalid(
+define float @select_nnan_fdiv_invalid(i1 %cond, float %A, float %B) {
+; CHECK-LABEL: @select_nnan_fdiv_invalid(
; CHECK-NEXT: [[C:%.*]] = fdiv float [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[D:%.*]] = select i1 [[COND:%.*]], float [[C]], float [[A]]
+; CHECK-NEXT: [[D:%.*]] = select nnan i1 [[COND:%.*]], float [[C]], float [[A]]
; CHECK-NEXT: ret float [[D]]
;
%C = fdiv float %B, %A
- %D = select i1 %cond, float %C, float %A
+ %D = select nnan i1 %cond, float %C, float %A
ret float %D
}
diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index a849041..278cabd 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -2925,10 +2925,8 @@ define i8 @select_replacement_loop3(i32 noundef %x) {
define i16 @select_replacement_loop4(i16 noundef %p_12) {
; CHECK-LABEL: @select_replacement_loop4(
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i16 [[P_12:%.*]], 2
-; CHECK-NEXT: [[AND1:%.*]] = and i16 [[P_12]], 1
-; CHECK-NEXT: [[AND2:%.*]] = select i1 [[CMP1]], i16 [[AND1]], i16 0
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i16 [[AND2]], [[P_12]]
+; CHECK-NEXT: [[AND1:%.*]] = and i16 [[P_12:%.*]], 1
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i16 [[P_12]], 2
; CHECK-NEXT: [[AND3:%.*]] = select i1 [[CMP2]], i16 [[AND1]], i16 0
; CHECK-NEXT: ret i16 [[AND3]]
;
@@ -3708,3 +3706,59 @@ define i32 @src_select_xxory_eq0_xorxy_y(i32 %x, i32 %y) {
%cond = select i1 %xor0, i32 %xor, i32 %y
ret i32 %cond
}
+
+define i32 @sequence_select_with_same_cond_false(i1 %c1, i1 %c2){
+; CHECK-LABEL: @sequence_select_with_same_cond_false(
+; CHECK-NEXT: [[S1:%.*]] = select i1 [[C1:%.*]], i32 23, i32 45
+; CHECK-NEXT: [[S2:%.*]] = select i1 [[C2:%.*]], i32 666, i32 [[S1]]
+; CHECK-NEXT: [[S3:%.*]] = select i1 [[C1]], i32 789, i32 [[S2]]
+; CHECK-NEXT: ret i32 [[S3]]
+;
+ %s1 = select i1 %c1, i32 23, i32 45
+ %s2 = select i1 %c2, i32 666, i32 %s1
+ %s3 = select i1 %c1, i32 789, i32 %s2
+ ret i32 %s3
+}
+
+define i32 @sequence_select_with_same_cond_true(i1 %c1, i1 %c2){
+; CHECK-LABEL: @sequence_select_with_same_cond_true(
+; CHECK-NEXT: [[S1:%.*]] = select i1 [[C1:%.*]], i32 45, i32 23
+; CHECK-NEXT: [[S2:%.*]] = select i1 [[C2:%.*]], i32 [[S1]], i32 666
+; CHECK-NEXT: [[S3:%.*]] = select i1 [[C1]], i32 [[S2]], i32 789
+; CHECK-NEXT: ret i32 [[S3]]
+;
+ %s1 = select i1 %c1, i32 45, i32 23
+ %s2 = select i1 %c2, i32 %s1, i32 666
+ %s3 = select i1 %c1, i32 %s2, i32 789
+ ret i32 %s3
+}
+
+define double @sequence_select_with_same_cond_double(double %a, i1 %c1, i1 %c2, double %r1, double %r2){
+; CHECK-LABEL: @sequence_select_with_same_cond_double(
+; CHECK-NEXT: [[S1:%.*]] = select i1 [[C1:%.*]], double 1.000000e+00, double 0.000000e+00
+; CHECK-NEXT: [[S2:%.*]] = select i1 [[C2:%.*]], double [[S1]], double 2.000000e+00
+; CHECK-NEXT: [[S3:%.*]] = select i1 [[C1]], double [[S2]], double 3.000000e+00
+; CHECK-NEXT: ret double [[S3]]
+;
+ %s1 = select i1 %c1, double 1.0, double 0.0
+ %s2 = select i1 %c2, double %s1, double 2.0
+ %s3 = select i1 %c1, double %s2, double 3.0
+ ret double %s3
+}
+
+declare void @use32(i32)
+
+define i32 @sequence_select_with_same_cond_extra_use(i1 %c1, i1 %c2){
+; CHECK-LABEL: @sequence_select_with_same_cond_extra_use(
+; CHECK-NEXT: [[S1:%.*]] = select i1 [[C1:%.*]], i32 23, i32 45
+; CHECK-NEXT: call void @use32(i32 [[S1]])
+; CHECK-NEXT: [[S2:%.*]] = select i1 [[C2:%.*]], i32 666, i32 [[S1]]
+; CHECK-NEXT: [[S3:%.*]] = select i1 [[C1]], i32 789, i32 [[S2]]
+; CHECK-NEXT: ret i32 [[S3]]
+;
+ %s1 = select i1 %c1, i32 23, i32 45
+ call void @use32(i32 %s1)
+ %s2 = select i1 %c2, i32 666, i32 %s1
+ %s3 = select i1 %c1, i32 789, i32 %s2
+ ret i32 %s3
+}
diff --git a/llvm/test/Transforms/InstCombine/select_meta.ll b/llvm/test/Transforms/InstCombine/select_meta.ll
index cd13310..aa794e8 100644
--- a/llvm/test/Transforms/InstCombine/select_meta.ll
+++ b/llvm/test/Transforms/InstCombine/select_meta.ll
@@ -360,23 +360,23 @@ define i128 @select_ashr(i1 %cond, i128 %x, i128 %y) {
define double @select_fmul(i1 %cond, double %x, double %y) {
; CHECK-LABEL: @select_fmul(
-; CHECK-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], double [[Y:%.*]], double 1.000000e+00, !prof [[PROF0]], !unpredictable [[META2]]
+; CHECK-NEXT: [[OP:%.*]] = select nnan i1 [[COND:%.*]], double [[Y:%.*]], double 1.000000e+00, !prof [[PROF0]], !unpredictable [[META2]]
; CHECK-NEXT: [[RET:%.*]] = fmul double [[OP]], [[X:%.*]]
; CHECK-NEXT: ret double [[RET]]
;
%op = fmul double %x, %y
- %ret = select i1 %cond, double %op, double %x, !prof !1, !unpredictable !3
+ %ret = select nnan i1 %cond, double %op, double %x, !prof !1, !unpredictable !3
ret double %ret
}
define <2 x float> @select_fdiv(i1 %cond, <2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @select_fdiv(
-; CHECK-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], <2 x float> [[Y:%.*]], <2 x float> <float 1.000000e+00, float 1.000000e+00>, !prof [[PROF0]], !unpredictable [[META2]]
+; CHECK-NEXT: [[OP:%.*]] = select nnan i1 [[COND:%.*]], <2 x float> [[Y:%.*]], <2 x float> <float 1.000000e+00, float 1.000000e+00>, !prof [[PROF0]], !unpredictable [[META2]]
; CHECK-NEXT: [[RET:%.*]] = fdiv <2 x float> [[X:%.*]], [[OP]]
; CHECK-NEXT: ret <2 x float> [[RET]]
;
%op = fdiv <2 x float> %x, %y
- %ret = select i1 %cond, <2 x float> %op, <2 x float> %x, !prof !1, !unpredictable !3
+ %ret = select nnan i1 %cond, <2 x float> %op, <2 x float> %x, !prof !1, !unpredictable !3
ret <2 x float> %ret
}
diff --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll
index 1b25675..aa3a238 100644
--- a/llvm/test/Transforms/InstCombine/shift-add.ll
+++ b/llvm/test/Transforms/InstCombine/shift-add.ll
@@ -775,3 +775,32 @@ define <3 x i32> @add3_i96(<3 x i32> %0, <3 x i32> %1) {
%25 = insertelement <3 x i32> %24, i32 %20, i32 2
ret <3 x i32> %25
}
+
+define i8 @shl_fold_or_disjoint_cnt(i8 %x) {
+; CHECK-LABEL: @shl_fold_or_disjoint_cnt(
+; CHECK-NEXT: [[R:%.*]] = shl i8 16, [[X:%.*]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %a = or disjoint i8 %x, 3
+ %r = shl i8 2, %a
+ ret i8 %r
+}
+
+define <2 x i8> @ashr_fold_or_disjoint_cnt(<2 x i8> %x) {
+; CHECK-LABEL: @ashr_fold_or_disjoint_cnt(
+; CHECK-NEXT: [[R:%.*]] = lshr <2 x i8> <i8 0, i8 1>, [[X:%.*]]
+; CHECK-NEXT: ret <2 x i8> [[R]]
+;
+ %a = or disjoint <2 x i8> %x, <i8 3, i8 1>
+ %r = ashr <2 x i8> <i8 2, i8 3>, %a
+ ret <2 x i8> %r
+}
+
+define <2 x i8> @lshr_fold_or_disjoint_cnt_out_of_bounds(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_fold_or_disjoint_cnt_out_of_bounds(
+; CHECK-NEXT: ret <2 x i8> zeroinitializer
+;
+ %a = or disjoint <2 x i8> %x, <i8 3, i8 8>
+ %r = lshr <2 x i8> <i8 2, i8 3>, %a
+ ret <2 x i8> %r
+}
diff --git a/llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll b/llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll
new file mode 100644
index 0000000..2b2f820
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+define i32 @shl_cttz_false(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @shl_cttz_false(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CTTZ:%.*]] = call i32 @llvm.cttz.i32(i32 [[Y]], i1 true), !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[RES:%.*]] = shl i32 [[X]], [[CTTZ]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 false)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_ctlz_false(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @shl_ctlz_false(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CTTZ:%.*]] = call i32 @llvm.ctlz.i32(i32 [[Y]], i1 true), !range [[RNG0]]
+; CHECK-NEXT: [[RES:%.*]] = shl i32 [[X]], [[CTTZ]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cttz = call i32 @llvm.ctlz.i32(i32 %y, i1 false)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @lshr_cttz_false(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @lshr_cttz_false(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CTTZ:%.*]] = call i32 @llvm.cttz.i32(i32 [[Y]], i1 true), !range [[RNG0]]
+; CHECK-NEXT: [[RES:%.*]] = lshr i32 [[X]], [[CTTZ]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 false)
+ %res = lshr i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @ashr_cttz_false(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @ashr_cttz_false(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CTTZ:%.*]] = call i32 @llvm.cttz.i32(i32 [[Y]], i1 true), !range [[RNG0]]
+; CHECK-NEXT: [[RES:%.*]] = ashr i32 [[X]], [[CTTZ]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 false)
+ %res = ashr i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_false_multiuse(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @shl_cttz_false_multiuse(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CTTZ:%.*]] = call i32 @llvm.cttz.i32(i32 [[Y]], i1 false), !range [[RNG0]]
+; CHECK-NEXT: call void @use(i32 [[CTTZ]])
+; CHECK-NEXT: [[RES:%.*]] = shl i32 [[X]], [[CTTZ]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 false)
+ call void @use(i32 %cttz)
+ %res = shl i32 %x, %cttz
+ ret i32 %res
+}
+
+define i32 @shl_cttz_as_lhs(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @shl_cttz_as_lhs(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CTTZ:%.*]] = call i32 @llvm.cttz.i32(i32 [[Y]], i1 false), !range [[RNG0]]
+; CHECK-NEXT: [[RES:%.*]] = shl i32 [[CTTZ]], [[X]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 false)
+ %res = shl i32 %cttz, %x
+ ret i32 %res
+}
+
+declare void @use(i32)
+;.
+; CHECK: [[RNG0]] = !{i32 0, i32 33}
+;.
diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll
index 62f32c2..bef7fc8 100644
--- a/llvm/test/Transforms/InstCombine/shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift.ll
@@ -1751,12 +1751,11 @@ define void @ashr_out_of_range_1(ptr %A) {
; CHECK-NEXT: [[L:%.*]] = load i177, ptr [[A:%.*]], align 4
; CHECK-NEXT: [[L_FROZEN:%.*]] = freeze i177 [[L]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i177 [[L_FROZEN]], -1
-; CHECK-NEXT: [[B:%.*]] = select i1 [[TMP1]], i177 0, i177 [[L_FROZEN]]
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i177 [[B]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = trunc i177 [[L_FROZEN]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 0, i64 [[TMP6]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i177, ptr [[A]], i64 [[TMP2]]
; CHECK-NEXT: [[G11:%.*]] = getelementptr i8, ptr [[TMP3]], i64 -24
-; CHECK-NEXT: [[C17:%.*]] = icmp sgt i177 [[B]], [[L_FROZEN]]
-; CHECK-NEXT: [[TMP4:%.*]] = sext i1 [[C17]] to i64
+; CHECK-NEXT: [[TMP4:%.*]] = sext i1 [[TMP1]] to i64
; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i177 [[L_FROZEN]], -1
; CHECK-NEXT: [[B28:%.*]] = select i1 [[TMP5]], i177 0, i177 [[L_FROZEN]]
diff --git a/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll b/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll
index 44ec77e..f573ff3 100644
--- a/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll
@@ -336,7 +336,18 @@ define <4 x i32> @srem(<4 x i32> %v) {
; Try FP ops/types.
-define <4 x float> @fadd(<4 x float> %v) {
+define <4 x float> @fadd_maybe_nan(<4 x float> %v) {
+; CHECK-LABEL: @fadd_maybe_nan(
+; CHECK-NEXT: [[B:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float poison, float poison>
+; CHECK-NEXT: [[S:%.*]] = shufflevector <4 x float> [[B]], <4 x float> [[V]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; CHECK-NEXT: ret <4 x float> [[S]]
+;
+ %b = fadd <4 x float> %v, <float 41.0, float 42.0, float 43.0, float 44.0>
+ %s = shufflevector <4 x float> %b, <4 x float> %v, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x float> %s
+}
+
+define <4 x float> @fadd(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fadd(
; CHECK-NEXT: [[S:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float -0.000000e+00, float -0.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -359,7 +370,7 @@ define <4 x double> @fsub(<4 x double> %v) {
; Propagate any FMF.
-define <4 x float> @fmul(<4 x float> %v) {
+define <4 x float> @fmul(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fmul(
; CHECK-NEXT: [[S:%.*]] = fmul nnan ninf <4 x float> [[V:%.*]], <float 4.100000e+01, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -380,7 +391,7 @@ define <4 x double> @fdiv_constant_op0(<4 x double> %v) {
ret <4 x double> %s
}
-define <4 x double> @fdiv_constant_op1(<4 x double> %v) {
+define <4 x double> @fdiv_constant_op1(<4 x double> nofpclass(nan) %v) {
; CHECK-LABEL: @fdiv_constant_op1(
; CHECK-NEXT: [[S:%.*]] = fdiv reassoc <4 x double> [[V:%.*]], <double undef, double 1.000000e+00, double 4.300000e+01, double 4.400000e+01>
; CHECK-NEXT: ret <4 x double> [[S]]
diff --git a/llvm/test/Transforms/InstCombine/shuffle_select.ll b/llvm/test/Transforms/InstCombine/shuffle_select.ll
index a1b0d78..efadb5c 100644
--- a/llvm/test/Transforms/InstCombine/shuffle_select.ll
+++ b/llvm/test/Transforms/InstCombine/shuffle_select.ll
@@ -336,7 +336,18 @@ define <4 x i32> @srem(<4 x i32> %v) {
; Try FP ops/types.
-define <4 x float> @fadd(<4 x float> %v) {
+define <4 x float> @fadd_maybe_nan(<4 x float> %v) {
+; CHECK-LABEL: @fadd_maybe_nan(
+; CHECK-NEXT: [[B:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float poison, float poison>
+; CHECK-NEXT: [[S:%.*]] = shufflevector <4 x float> [[B]], <4 x float> [[V]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; CHECK-NEXT: ret <4 x float> [[S]]
+;
+ %b = fadd <4 x float> %v, <float 41.0, float 42.0, float 43.0, float 44.0>
+ %s = shufflevector <4 x float> %b, <4 x float> %v, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x float> %s
+}
+
+define <4 x float> @fadd(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fadd(
; CHECK-NEXT: [[S:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float -0.000000e+00, float -0.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -359,7 +370,7 @@ define <4 x double> @fsub(<4 x double> %v) {
; Propagate any FMF.
-define <4 x float> @fmul(<4 x float> %v) {
+define <4 x float> @fmul(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fmul(
; CHECK-NEXT: [[S:%.*]] = fmul nnan ninf <4 x float> [[V:%.*]], <float 4.100000e+01, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -380,7 +391,7 @@ define <4 x double> @fdiv_constant_op0(<4 x double> %v) {
ret <4 x double> %s
}
-define <4 x double> @fdiv_constant_op1(<4 x double> %v) {
+define <4 x double> @fdiv_constant_op1(<4 x double> nofpclass(nan) %v) {
; CHECK-LABEL: @fdiv_constant_op1(
; CHECK-NEXT: [[S:%.*]] = fdiv reassoc <4 x double> [[V:%.*]], <double undef, double 1.000000e+00, double 4.300000e+01, double 4.400000e+01>
; CHECK-NEXT: ret <4 x double> [[S]]
diff --git a/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll b/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll
index 2e1ff0a..461c9b0 100644
--- a/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll
@@ -117,11 +117,9 @@ define i64 @sext_diff_i1_xor_sub_1(i64 %a, i1 %b, i1 %c) {
define i64 @sext_multi_uses(i64 %a, i1 %b, i64 %x) {
; CHECK-LABEL: define i64 @sext_multi_uses(
; CHECK-SAME: i64 [[A:%.*]], i1 [[B:%.*]], i64 [[X:%.*]]) {
-; CHECK-NEXT: [[C:%.*]] = sext i1 [[B]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = sub i64 0, [[A]]
-; CHECK-NEXT: [[E:%.*]] = select i1 [[B]], i64 [[TMP1]], i64 [[A]]
-; CHECK-NEXT: [[F:%.*]] = mul i64 [[C]], [[X]]
-; CHECK-NEXT: [[R:%.*]] = add i64 [[F]], [[E]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[X]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 0, [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = select i1 [[B]], i64 [[TMP2]], i64 [[A]]
; CHECK-NEXT: ret i64 [[R]]
;
%c = sext i1 %b to i64
diff --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll
index c6bc06d..760825d 100644
--- a/llvm/test/Transforms/InstCombine/trunc.ll
+++ b/llvm/test/Transforms/InstCombine/trunc.ll
@@ -1021,3 +1021,40 @@ define i16 @PR44545(i32 %t0, i32 %data) {
%sub = add nsw i16 %cast, -1
ret i16 %sub
}
+
+; Make sure that SimplifyDemandedBits drops the nowrap flags
+define i8 @drop_nsw_trunc(i16 %x, i16 %y) {
+; CHECK-LABEL: @drop_nsw_trunc(
+; CHECK-NEXT: [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = trunc i16 [[AND2]] to i8
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %and = and i16 %x, 255
+ %and2 = and i16 %and, %y
+ %res = trunc nsw i16 %and2 to i8
+ ret i8 %res
+}
+
+define i8 @drop_nuw_trunc(i16 %x, i16 %y) {
+; CHECK-LABEL: @drop_nuw_trunc(
+; CHECK-NEXT: [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[B:%.*]] = trunc i16 [[AND2]] to i8
+; CHECK-NEXT: ret i8 [[B]]
+;
+ %and = and i16 %x, 255
+ %and2 = and i16 %and, %y
+ %res = trunc nuw i16 %and2 to i8
+ ret i8 %res
+}
+
+define i8 @drop_both_trunc(i16 %x, i16 %y) {
+; CHECK-LABEL: @drop_both_trunc(
+; CHECK-NEXT: [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = trunc i16 [[AND2]] to i8
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %and = and i16 %x, 255
+ %and2 = and i16 %and, %y
+ %res = trunc nuw nsw i16 %and2 to i8
+ ret i8 %res
+}
diff --git a/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll b/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll
index 28d309b..fd5d38b 100644
--- a/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll
+++ b/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll
@@ -124,3 +124,26 @@ define { i32, i1 } @no_fold_wrapped_add(i32 %x) {
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 30, i32 %a)
ret { i32, i1 } %b
}
+
+
+define { <2 x i32>, <2 x i1> } @fold_simple_splat_with_disjoint_or_constant(<2 x i32> %x) {
+; CHECK-LABEL: @fold_simple_splat_with_disjoint_or_constant(
+; CHECK-NEXT: [[B:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 42, i32 42>)
+; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
+;
+ %a = or disjoint <2 x i32> %x, <i32 12, i32 12>
+ %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
+ ret { <2 x i32>, <2 x i1> } %b
+}
+
+
+define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant_with_or_fail(<2 x i32> %x) {
+; CHECK-LABEL: @fold_simple_splat_constant_with_or_fail(
+; CHECK-NEXT: [[A:%.*]] = or <2 x i32> [[X:%.*]], <i32 12, i32 12>
+; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
+; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
+;
+ %a = or <2 x i32> %x, <i32 12, i32 12>
+ %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
+ ret { <2 x i32>, <2 x i1> } %b
+}
diff --git a/llvm/test/Transforms/InstCombine/zext.ll b/llvm/test/Transforms/InstCombine/zext.ll
index edbd485..88cd9c7 100644
--- a/llvm/test/Transforms/InstCombine/zext.ll
+++ b/llvm/test/Transforms/InstCombine/zext.ll
@@ -836,3 +836,34 @@ define i64 @zext_nneg_demanded_constant(i8 %a) nounwind {
%c = and i64 %b, 254
ret i64 %c
}
+
+define i32 @zext_nneg_i1(i1 %x) {
+; CHECK-LABEL: @zext_nneg_i1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %res = zext nneg i1 %x to i32
+ ret i32 %res
+}
+
+define <2 x i32> @zext_nneg_i1_vec(<2 x i1> %x) {
+; CHECK-LABEL: @zext_nneg_i1_vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret <2 x i32> zeroinitializer
+;
+entry:
+ %res = zext nneg <2 x i1> %x to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define i32 @zext_nneg_i2(i2 %x) {
+; CHECK-LABEL: @zext_nneg_i2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RES:%.*]] = zext nneg i2 [[X:%.*]] to i32
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %res = zext nneg i2 %x to i32
+ ret i32 %res
+}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll
index 6dd34a4..d91349a 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll
@@ -11,21 +11,21 @@ target triple = "x86_64-unknown-linux-gnu"
define ptr @f0() {
; CHECK-LABEL: @f0(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([3 x ptr], ptr @vt, inrange i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds inrange(-16, 8) ([3 x ptr], ptr @vt, i64 0, i64 2)
;
- ret ptr getelementptr (ptr, ptr getelementptr inbounds ([3 x ptr], ptr @vt, inrange i64 0, i64 1), i64 1)
+ ret ptr getelementptr (ptr, ptr getelementptr inbounds inrange(-8, 16) ([3 x ptr], ptr @vt, i64 0, i64 1), i64 1)
}
define ptr @f1() {
; CHECK-LABEL: @f1(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([3 x ptr], ptr @vt, i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds inrange(-8, 0) ([3 x ptr], ptr @vt, i64 0, i64 2)
;
- ret ptr getelementptr (ptr, ptr getelementptr inbounds ([3 x ptr], ptr @vt, i64 0, inrange i64 1), i64 1)
+ ret ptr getelementptr (ptr, ptr getelementptr inbounds inrange(0, 8) ([3 x ptr], ptr @vt, i64 0, i64 1), i64 1)
}
define ptr @f2() {
; CHECK-LABEL: @f2(
-; CHECK-NEXT: ret ptr getelementptr ([3 x ptr], ptr @vt, i64 1, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inrange(-24, -16) ([3 x ptr], ptr @vt, i64 1, i64 1)
;
- ret ptr getelementptr (ptr, ptr getelementptr inbounds ([3 x ptr], ptr @vt, i64 0, inrange i64 1), i64 3)
+ ret ptr getelementptr (ptr, ptr getelementptr inbounds inrange(0, 8) ([3 x ptr], ptr @vt, i64 0, i64 1), i64 3)
}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll b/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
index 3851bd0..b4afb7b 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
@@ -298,3 +298,11 @@ bb:
%cmp = icmp eq ptr blockaddress(@blockaddr_no_cfi, %bb), no_cfi @func
ret i1 %cmp
}
+
+define i1 @global_no_cfi_dso_local_equivalent() {
+; CHECK-LABEL: @global_no_cfi_dso_local_equivalent(
+; CHECK-NEXT: ret i1 icmp eq (ptr dso_local_equivalent @func, ptr no_cfi @func)
+;
+ %cmp = icmp eq ptr dso_local_equivalent @func, no_cfi @func
+ ret i1 %cmp
+}
diff --git a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll
index b8244b1..5d17504 100644
--- a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll
+++ b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll
@@ -211,6 +211,50 @@ define double @fmul_nnan_ninf_nneg_n0.0_commute(i127 %x) {
ret double %r
}
+define float @src_mul_nzero_neg(float nofpclass(inf nan pzero psub pnorm) %f) {
+; CHECK-LABEL: @src_mul_nzero_neg(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %r = fmul float %f, -0.0
+ ret float %r
+}
+
+define <2 x float> @src_mul_zero_neg(<2 x float> nofpclass(inf nan pzero psub pnorm) %f) {
+; CHECK-LABEL: @src_mul_zero_neg(
+; CHECK-NEXT: ret <2 x float> <float -0.000000e+00, float -0.000000e+00>
+;
+ %r = fmul <2 x float> <float 0.0, float 0.0>, %f
+ ret <2 x float> %r
+}
+
+define <2 x float> @src_mul_zero_and_nzero_neg(<2 x float> nofpclass(inf nan pzero psub pnorm) %f) {
+; CHECK-LABEL: @src_mul_zero_and_nzero_neg(
+; CHECK-NEXT: ret <2 x float> <float 0.000000e+00, float -0.000000e+00>
+;
+ %r = fmul <2 x float> <float -0.0, float 0.0>, %f
+ ret <2 x float> %r
+}
+
+
+define float @src_muladd_zero_neg(float nofpclass(inf nan pzero psub pnorm) %f, float %add) {
+; CHECK-LABEL: @src_muladd_zero_neg(
+; CHECK-NEXT: [[R:%.*]] = call float @llvm.fmuladd.f32(float [[F:%.*]], float 0.000000e+00, float [[ADD:%.*]])
+; CHECK-NEXT: ret float [[R]]
+;
+ %r = call float @llvm.fmuladd.f32(float %f, float 0.0, float %add)
+ ret float %r
+}
+
+define float @src_fma_nzero_neg(float nofpclass(inf nan pzero psub pnorm) %f, float %add) {
+; CHECK-LABEL: @src_fma_nzero_neg(
+; CHECK-NEXT: [[R:%.*]] = call float @llvm.fma.f32(float -0.000000e+00, float [[F:%.*]], float [[ADD:%.*]])
+; CHECK-NEXT: ret float [[R]]
+;
+ %r = call float @llvm.fma.f32(float -0.0, float %f, float %add)
+ ret float %r
+}
+
+
; Make sure we can infer %x can't be 0 based on assumes.
define { float, float } @test_fmul_0_assumed_finite(float %x) {
; CHECK-LABEL: @test_fmul_0_assumed_finite(
diff --git a/llvm/test/Transforms/InstSimplify/icmp-constant.ll b/llvm/test/Transforms/InstSimplify/icmp-constant.ll
index 04261f6..99bf11b 100644
--- a/llvm/test/Transforms/InstSimplify/icmp-constant.ll
+++ b/llvm/test/Transforms/InstSimplify/icmp-constant.ll
@@ -1140,3 +1140,131 @@ define <2 x i1> @heterogeneous_constvector(<2 x i8> %x) {
%c = icmp ult <2 x i8> %x, <i8 undef, i8 poison>
ret <2 x i1> %c
}
+
+define i1 @icmp_eq_constant_range_attr(i8 range(i8 0, 10) %i) {
+; CHECK-LABEL: @icmp_eq_constant_range_attr(
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = icmp eq i8 %i, 10
+ ret i1 %cmp
+}
+
+define i1 @neg_icmp_eq_constant_range_attr(i8 range(i8 0, 11) %i) {
+; CHECK-LABEL: @neg_icmp_eq_constant_range_attr(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[I:%.*]], 10
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %cmp = icmp eq i8 %i, 10
+ ret i1 %cmp
+}
+
+declare range(i8 1, 0) i8 @returns_non_ten_range_helper()
+declare range(i8 -1, 1) i8 @returns_contain_ten_range_helper()
+
+define i1 @icmp_eq_constant_range_return() {
+; CHECK-LABEL: @icmp_eq_constant_range_return(
+; CHECK-NEXT: [[I:%.*]] = call i8 @returns_non_ten_range_helper()
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[I]], 10
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i = call i8 @returns_non_ten_range_helper()
+ %cmp = icmp eq i8 %i, 10
+ ret i1 %cmp
+}
+
+define i1 @neg_icmp_eq_constant_range_return() {
+; CHECK-LABEL: @neg_icmp_eq_constant_range_return(
+; CHECK-NEXT: [[I:%.*]] = call i8 @returns_contain_ten_range_helper()
+; CHECK-NEXT: ret i1 false
+;
+ %i = call i8 @returns_contain_ten_range_helper()
+ %cmp = icmp eq i8 %i, 10
+ ret i1 %cmp
+}
+
+declare i8 @returns_i8_helper()
+
+define i1 @icmp_eq_constant_range_call() {
+; CHECK-LABEL: @icmp_eq_constant_range_call(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 0, 10) i8 @returns_i8_helper()
+; CHECK-NEXT: ret i1 false
+;
+ %i = call range(i8 0, 10) i8 @returns_i8_helper()
+ %cmp = icmp eq i8 %i, 10
+ ret i1 %cmp
+}
+
+define i1 @neg_icmp_eq_constant_range_call() {
+; CHECK-LABEL: @neg_icmp_eq_constant_range_call(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 0, 11) i8 @returns_i8_helper()
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[I]], 10
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i = call range(i8 0, 11) i8 @returns_i8_helper()
+ %cmp = icmp eq i8 %i, 10
+ ret i1 %cmp
+}
+
+define <2 x i1> @icmp_eq_constant_range_attr_vec(<2 x i8> range(i8 0, 10) %i) {
+; CHECK-LABEL: @icmp_eq_constant_range_attr_vec(
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
+;
+ %cmp = icmp eq <2 x i8> %i, <i8 10, i8 10>
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @neg_icmp_eq_constant_range_attr_vec(<2 x i8> range(i8 0, 11) %i) {
+; CHECK-LABEL: @neg_icmp_eq_constant_range_attr_vec(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[I:%.*]], <i8 10, i8 10>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %cmp = icmp eq <2 x i8> %i, <i8 10, i8 10>
+ ret <2 x i1> %cmp
+}
+
+declare range(i8 0, 10) <2 x i8> @returns_non_ten_range_helper_vec()
+declare range(i8 0, 11) <2 x i8> @returns_contain_ten_range_helper_vec()
+
+define <2 x i1> @icmp_eq_constant_range_return_vec() {
+; CHECK-LABEL: @icmp_eq_constant_range_return_vec(
+; CHECK-NEXT: [[I:%.*]] = call <2 x i8> @returns_non_ten_range_helper_vec()
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
+;
+ %i = call <2 x i8> @returns_non_ten_range_helper_vec()
+ %cmp = icmp eq <2 x i8> %i, <i8 10, i8 10>
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @neg_icmp_eq_constant_range_return_vec() {
+; CHECK-LABEL: @neg_icmp_eq_constant_range_return_vec(
+; CHECK-NEXT: [[I:%.*]] = call <2 x i8> @returns_contain_ten_range_helper_vec()
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[I]], <i8 10, i8 10>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %i = call <2 x i8> @returns_contain_ten_range_helper_vec()
+ %cmp = icmp eq <2 x i8> %i, <i8 10, i8 10>
+ ret <2 x i1> %cmp
+}
+
+declare <2 x i8> @returns_i8_helper_vec()
+
+define <2 x i1> @icmp_eq_constant_range_call_vec() {
+; CHECK-LABEL: @icmp_eq_constant_range_call_vec(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 0, 10) <2 x i8> @returns_i8_helper_vec()
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
+;
+ %i = call range(i8 0, 10) <2 x i8> @returns_i8_helper_vec()
+ %cmp = icmp eq <2 x i8> %i, <i8 10, i8 10>
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @neg_icmp_eq_constant_range_call_vec() {
+; CHECK-LABEL: @neg_icmp_eq_constant_range_call_vec(
+; CHECK-NEXT: [[I:%.*]] = call range(i8 0, 11) <2 x i8> @returns_i8_helper_vec()
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[I]], <i8 10, i8 10>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %i = call range(i8 0, 11) <2 x i8> @returns_i8_helper_vec()
+ %cmp = icmp eq <2 x i8> %i, <i8 10, i8 10>
+ ret <2 x i1> %cmp
+}
diff --git a/llvm/test/Transforms/InstSimplify/shift-knownbits.ll b/llvm/test/Transforms/InstSimplify/shift-knownbits.ll
index c2256f4..6bf0377 100644
--- a/llvm/test/Transforms/InstSimplify/shift-knownbits.ll
+++ b/llvm/test/Transforms/InstSimplify/shift-knownbits.ll
@@ -14,6 +14,145 @@ define i32 @shl_amount_is_known_bogus(i32 %a, i32 %b) {
ret i32 %shl
}
+define i32 @shl_amount_is_known_bogus_range_attr(i32 %a, i32 range(i32 32, 64) %b) {
+; CHECK-LABEL: @shl_amount_is_known_bogus_range_attr(
+; CHECK-NEXT: ret i32 poison
+;
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
+define i32 @neg_shl_amount_is_known_bogus_range_attr(i32 %a, i32 range(i32 0, 32) %b) {
+; CHECK-LABEL: @neg_shl_amount_is_known_bogus_range_attr(
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[SHL]]
+;
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
+declare range(i32 32, 64) i32 @returns_out_of_range_helper()
+declare range(i32 0, 32) i32 @returns_in_range_helper()
+
+define i32 @shl_amount_is_known_bogus_range_return(i32 %a) {
+; CHECK-LABEL: @shl_amount_is_known_bogus_range_return(
+; CHECK-NEXT: [[B:%.*]] = call i32 @returns_out_of_range_helper()
+; CHECK-NEXT: ret i32 poison
+;
+ %b = call i32 @returns_out_of_range_helper()
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
+define i32 @neg_shl_amount_is_known_bogus_range_return(i32 %a) {
+; CHECK-LABEL: @neg_shl_amount_is_known_bogus_range_return(
+; CHECK-NEXT: [[B:%.*]] = call i32 @returns_in_range_helper()
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[B]]
+; CHECK-NEXT: ret i32 [[SHL]]
+;
+ %b = call i32 @returns_in_range_helper()
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
+declare i32 @returns_i32_helper()
+
+define i32 @shl_amount_is_known_bogus_range_call(i32 %a) {
+; CHECK-LABEL: @shl_amount_is_known_bogus_range_call(
+; CHECK-NEXT: [[B:%.*]] = call range(i32 32, 64) i32 @returns_i32_helper()
+; CHECK-NEXT: ret i32 poison
+;
+ %b = call range(i32 32, 64) i32 @returns_i32_helper()
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
+define i32 @neg_shl_amount_is_known_bogus_range_call(i32 %a) {
+; CHECK-LABEL: @neg_shl_amount_is_known_bogus_range_call(
+; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 32) i32 @returns_i32_helper()
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[B]]
+; CHECK-NEXT: ret i32 [[SHL]]
+;
+ %b = call range(i32 0, 32) i32 @returns_i32_helper()
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
+define <2 x i32> @shl_amount_is_known_bogus_range_attr_vec(<2 x i32> %a, <2 x i32> range(i32 32, 64) %b) {
+; CHECK-LABEL: @shl_amount_is_known_bogus_range_attr_vec(
+; CHECK-NEXT: ret <2 x i32> poison
+;
+ %shl = shl <2 x i32> %a, %b
+ ret <2 x i32> %shl
+}
+
+define <2 x i32> @neg_shl_amount_is_known_bogus_range_attr_vec(<2 x i32> %a, <2 x i32> range(i32 0, 32) %b) {
+; CHECK-LABEL: @neg_shl_amount_is_known_bogus_range_attr_vec(
+; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[SHL]]
+;
+ %shl = shl <2 x i32> %a, %b
+ ret <2 x i32> %shl
+}
+
+declare range(i32 32, 64) <2 x i32> @returns_out_of_range_helper_vec()
+declare range(i32 0, 32) <2 x i32> @returns_in_range_helper_vec()
+
+define <2 x i32> @shl_amount_is_known_bogus_range_return_vec(<2 x i32> %a) {
+; CHECK-LABEL: @shl_amount_is_known_bogus_range_return_vec(
+; CHECK-NEXT: [[B:%.*]] = call <2 x i32> @returns_out_of_range_helper_vec()
+; CHECK-NEXT: ret <2 x i32> poison
+;
+ %b = call <2 x i32> @returns_out_of_range_helper_vec()
+ %shl = shl <2 x i32> %a, %b
+ ret <2 x i32> %shl
+}
+
+define <2 x i32> @neg_shl_amount_is_known_bogus_range_return_vec(<2 x i32> %a) {
+; CHECK-LABEL: @neg_shl_amount_is_known_bogus_range_return_vec(
+; CHECK-NEXT: [[B:%.*]] = call <2 x i32> @returns_in_range_helper_vec()
+; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> [[A:%.*]], [[B]]
+; CHECK-NEXT: ret <2 x i32> [[SHL]]
+;
+ %b = call <2 x i32> @returns_in_range_helper_vec()
+ %shl = shl <2 x i32> %a, %b
+ ret <2 x i32> %shl
+}
+
+declare <2 x i32> @returns_i32_helper_vec()
+
+define <2 x i32> @shl_amount_is_known_bogus_range_call_vec(<2 x i32> %a) {
+; CHECK-LABEL: @shl_amount_is_known_bogus_range_call_vec(
+; CHECK-NEXT: [[B:%.*]] = call range(i32 32, 64) <2 x i32> @returns_i32_helper_vec()
+; CHECK-NEXT: ret <2 x i32> poison
+;
+ %b = call range(i32 32, 64) <2 x i32> @returns_i32_helper_vec()
+ %shl = shl <2 x i32> %a, %b
+ ret <2 x i32> %shl
+}
+
+define <2 x i32> @neg_shl_amount_is_known_bogus_range_call_vec(<2 x i32> %a) {
+; CHECK-LABEL: @neg_shl_amount_is_known_bogus_range_call_vec(
+; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 32) <2 x i32> @returns_i32_helper_vec()
+; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> [[A:%.*]], [[B]]
+; CHECK-NEXT: ret <2 x i32> [[SHL]]
+;
+ %b = call range(i32 0, 32) <2 x i32> @returns_i32_helper_vec()
+ %shl = shl <2 x i32> %a, %b
+ ret <2 x i32> %shl
+}
+
+define i32 @shl_amount_is_not_known_bogus_range_call_and_range_metadata(i32 %a) {
+; CHECK-LABEL: @shl_amount_is_not_known_bogus_range_call_and_range_metadata(
+; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 32) i32 @returns_i32_helper(), !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[B]]
+; CHECK-NEXT: ret i32 [[SHL]]
+;
+ %b = call range(i32 0, 32) i32 @returns_i32_helper(), !range !{ i32 32, i32 64 }
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
; Check some weird types and the other shift ops.
define i31 @lshr_amount_is_known_bogus(i31 %a, i31 %b) {
diff --git a/llvm/test/Transforms/Internalize/vcall-visibility.ll b/llvm/test/Transforms/Internalize/vcall-visibility.ll
index c2fe8c3..ee67535 100644
--- a/llvm/test/Transforms/Internalize/vcall-visibility.ll
+++ b/llvm/test/Transforms/Internalize/vcall-visibility.ll
@@ -42,7 +42,7 @@ entry:
define hidden noalias nonnull ptr @_Z6make_dv() {
entry:
%call = tail call ptr @_Znwm(i64 8) #3
- store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTVN12_GLOBAL__N_11DE, i64 0, inrange i32 0, i64 2), ptr %call, align 8
+ store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTVN12_GLOBAL__N_11DE, i64 0, i32 0, i64 2), ptr %call, align 8
ret ptr %call
}
diff --git a/llvm/test/Transforms/LICM/expr-reassociate-int.ll b/llvm/test/Transforms/LICM/expr-reassociate-int.ll
index 6354897..7028873 100644
--- a/llvm/test/Transforms/LICM/expr-reassociate-int.ll
+++ b/llvm/test/Transforms/LICM/expr-reassociate-int.ll
@@ -23,7 +23,7 @@ define void @innermost_loop_1d_shouldhoist(i32 %i, i64 %d1, i64 %delta, ptr %cel
; CHECK-LABEL: define void @innermost_loop_1d_shouldhoist
; CHECK-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[MUL_1:%.*]] = mul i64 [[DELTA]], [[D1]]
+; CHECK-NEXT: [[MUL_1:%.*]] = mul nuw nsw i64 [[DELTA]], [[D1]]
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
@@ -55,7 +55,7 @@ for.body:
%idxprom.j.1 = zext i32 %add.j.1 to i64
%arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
%cell.1 = load i64, ptr %arrayidx.j.1, align 8
- %mul.1 = mul i64 %delta, %d1
+ %mul.1 = mul nsw nuw i64 %delta, %d1
%mul.2 = mul i64 %mul.1, %cell.1
%idxprom.j = zext i32 %j to i64
%arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
@@ -130,8 +130,8 @@ define void @innermost_loop_2d(i32 %i, i64 %d1, i64 %d2, i64 %delta, ptr %cells)
; CONSTRAINED-NEXT: [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
; CONSTRAINED-NEXT: [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
; CONSTRAINED-NEXT: [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
-; CONSTRAINED-NEXT: [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[D2]]
-; CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; CONSTRAINED-NEXT: [[MUL_2:%.*]] = mul nuw nsw i64 [[CELL_2]], [[D2]]
+; CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add nuw nsw i64 [[MUL_2]], [[MUL_1]]
; CONSTRAINED-NEXT: [[REASS_MUL:%.*]] = mul i64 [[REASS_ADD]], [[DELTA]]
; CONSTRAINED-NEXT: store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J]], align 8
; CONSTRAINED-NEXT: br label [[FOR_COND]]
@@ -155,8 +155,8 @@ for.body:
%idxprom.j = zext i32 %j to i64
%arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
%cell.2 = load i64, ptr %arrayidx.j, align 8
- %mul.2 = mul i64 %cell.2, %d2
- %reass.add = add i64 %mul.2, %mul.1
+ %mul.2 = mul nsw nuw i64 %cell.2, %d2
+ %reass.add = add nsw nuw i64 %mul.2, %mul.1
%reass.mul = mul i64 %reass.add, %delta
store i64 %reass.mul, ptr %arrayidx.j, align 8
br label %for.cond
@@ -243,10 +243,10 @@ define void @innermost_loop_3d(i32 %i, i64 %d1, i64 %d2, i64 %d3, i64 %delta, pt
; CONSTRAINED-NEXT: [[IDXPROM_J_2:%.*]] = zext i32 [[ADD_J_2]] to i64
; CONSTRAINED-NEXT: [[ARRAYIDX_J_2:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_2]]
; CONSTRAINED-NEXT: [[CELL_3:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
-; CONSTRAINED-NEXT: [[MUL_3:%.*]] = mul i64 [[CELL_3]], [[D3]]
-; CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
-; CONSTRAINED-NEXT: [[REASS_ADD1:%.*]] = add i64 [[REASS_ADD]], [[MUL_3]]
-; CONSTRAINED-NEXT: [[REASS_MUL:%.*]] = mul i64 [[REASS_ADD1]], [[DELTA]]
+; CONSTRAINED-NEXT: [[MUL_3:%.*]] = mul nuw nsw i64 [[CELL_3]], [[D3]]
+; CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add nuw nsw i64 [[MUL_2]], [[MUL_1]]
+; CONSTRAINED-NEXT: [[REASS_ADD1:%.*]] = add nuw nsw i64 [[REASS_ADD]], [[MUL_3]]
+; CONSTRAINED-NEXT: [[REASS_MUL:%.*]] = mul nuw nsw i64 [[REASS_ADD1]], [[DELTA]]
; CONSTRAINED-NEXT: store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J_2]], align 8
; CONSTRAINED-NEXT: br label [[FOR_COND]]
; CONSTRAINED: for.end:
@@ -274,10 +274,10 @@ for.body:
%idxprom.j.2 = zext i32 %add.j.2 to i64
%arrayidx.j.2 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.2
%cell.3 = load i64, ptr %arrayidx.j.2, align 8
- %mul.3 = mul i64 %cell.3, %d3
- %reass.add = add i64 %mul.2, %mul.1
- %reass.add1 = add i64 %reass.add, %mul.3
- %reass.mul = mul i64 %reass.add1, %delta
+ %mul.3 = mul nsw nuw i64 %cell.3, %d3
+ %reass.add = add nsw nuw i64 %mul.2, %mul.1
+ %reass.add1 = add nsw nuw i64 %reass.add, %mul.3
+ %reass.mul = mul nsw nuw i64 %reass.add1, %delta
store i64 %reass.mul, ptr %arrayidx.j.2, align 8
br label %for.cond
@@ -362,3 +362,34 @@ for.body:
for.end:
ret void
}
+
+; Make sure we drop poison flags on the mul in the loop.
+define i32 @pr85457(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @pr85457
+; CHECK-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[FACTOR_OP_MUL:%.*]] = mul i32 [[X]], [[Y]]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
+; CHECK-NEXT: [[MUL0:%.*]] = mul i32 [[FACTOR_OP_MUL]], [[IV]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MUL0]], 1
+; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 1, %entry ], [ %iv.next, %loop ]
+ %iv.next = add nuw nsw i32 %iv, 1
+ %mul0 = mul nuw nsw i32 %x, %iv
+ %mul1 = mul nuw i32 %mul0, %y
+ %cmp = icmp slt i32 %mul1, 1
+ br i1 %cmp, label %exit, label %loop
+
+exit:
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll b/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll
index e6a0c5f..daa64f2 100644
--- a/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll
+++ b/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll
@@ -2,6 +2,9 @@
; RUN: opt -aarch64-lit -aarch64-lit-verify -verify-dom-info -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s
; RUN: opt -aarch64-lit -simplifycfg -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=LOOP-DEL
; RUN: opt -aarch64-lit -mtriple aarch64-unknown-linux-gnu -S < %s | FileCheck %s --check-prefix=NO-TRANSFORM
+; RUN: opt -p aarch64-lit -aarch64-lit-verify -verify-dom-info -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s
+; RUN: opt -passes='function(loop(aarch64-lit)),simplifycfg' -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=LOOP-DEL
+; RUN: opt -p aarch64-lit -mtriple aarch64-unknown-linux-gnu -S < %s | FileCheck %s --check-prefix=NO-TRANSFORM
define i32 @compare_bytes_simple(ptr %a, ptr %b, i32 %len, i32 %extra, i32 %n) {
; CHECK-LABEL: define i32 @compare_bytes_simple(
@@ -780,7 +783,7 @@ define i32 @compare_bytes_extra_cmp(ptr %a, ptr %b, i32 %len, i32 %n, i32 %x) {
; CHECK-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[MISMATCH_END]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY:%.*]] ]
; CHECK-NEXT: [[INC:%.*]] = add i32 [[LEN_ADDR]], 1
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[MISMATCH_RESULT]], [[N]]
-; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END]], label [[WHILE_BODY]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]]
; CHECK: while.body:
; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[MISMATCH_RESULT]] to i64
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
@@ -788,11 +791,14 @@ define i32 @compare_bytes_extra_cmp(ptr %a, ptr %b, i32 %len, i32 %n, i32 %x) {
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
; CHECK-NEXT: [[TMP46:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
; CHECK-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP45]], [[TMP46]]
-; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END_LOOPEXIT]]
; CHECK: byte.compare:
+; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT]]
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: [[INC_LCSSA1:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
; CHECK-NEXT: br label [[WHILE_END]]
; CHECK: while.end:
-; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[MISMATCH_RESULT]], [[WHILE_BODY]] ], [ [[MISMATCH_RESULT]], [[WHILE_COND]] ], [ [[X]], [[ENTRY:%.*]] ], [ [[MISMATCH_RESULT]], [[BYTE_COMPARE]] ]
+; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ [[INC_LCSSA1]], [[WHILE_END_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[INC_LCSSA]]
;
; LOOP-DEL-LABEL: define i32 @compare_bytes_extra_cmp(
@@ -884,7 +890,7 @@ define i32 @compare_bytes_extra_cmp(ptr %a, ptr %b, i32 %len, i32 %n, i32 %x) {
; NO-TRANSFORM-NEXT: [[LEN_ADDR:%.*]] = phi i32 [ [[LEN]], [[PH]] ], [ [[INC:%.*]], [[WHILE_BODY:%.*]] ]
; NO-TRANSFORM-NEXT: [[INC]] = add i32 [[LEN_ADDR]], 1
; NO-TRANSFORM-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
-; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END]], label [[WHILE_BODY]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]]
; NO-TRANSFORM: while.body:
; NO-TRANSFORM-NEXT: [[IDXPROM:%.*]] = zext i32 [[INC]] to i64
; NO-TRANSFORM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IDXPROM]]
@@ -892,9 +898,12 @@ define i32 @compare_bytes_extra_cmp(ptr %a, ptr %b, i32 %len, i32 %n, i32 %x) {
; NO-TRANSFORM-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IDXPROM]]
; NO-TRANSFORM-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
; NO-TRANSFORM-NEXT: [[CMP_NOT2:%.*]] = icmp eq i8 [[TMP0]], [[TMP1]]
-; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END]]
+; NO-TRANSFORM-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_COND]], label [[WHILE_END_LOOPEXIT]]
+; NO-TRANSFORM: while.end.loopexit:
+; NO-TRANSFORM-NEXT: [[INC_LCSSA1:%.*]] = phi i32 [ [[INC]], [[WHILE_COND]] ], [ [[INC]], [[WHILE_BODY]] ]
+; NO-TRANSFORM-NEXT: br label [[WHILE_END]]
; NO-TRANSFORM: while.end:
-; NO-TRANSFORM-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ], [ [[INC]], [[WHILE_COND]] ], [ [[X]], [[ENTRY:%.*]] ]
+; NO-TRANSFORM-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ [[INC_LCSSA1]], [[WHILE_END_LOOPEXIT]] ]
; NO-TRANSFORM-NEXT: ret i32 [[INC_LCSSA]]
;
entry:
@@ -908,7 +917,7 @@ while.cond:
%len.addr = phi i32 [ %len, %ph ], [ %inc, %while.body ]
%inc = add i32 %len.addr, 1
%cmp.not = icmp eq i32 %inc, %n
- br i1 %cmp.not, label %while.end, label %while.body
+ br i1 %cmp.not, label %while.end.loopexit, label %while.body
while.body:
%idxprom = zext i32 %inc to i64
@@ -917,10 +926,14 @@ while.body:
%arrayidx2 = getelementptr inbounds i8, ptr %b, i64 %idxprom
%1 = load i8, ptr %arrayidx2
%cmp.not2 = icmp eq i8 %0, %1
- br i1 %cmp.not2, label %while.cond, label %while.end
+ br i1 %cmp.not2, label %while.cond, label %while.end.loopexit
+
+while.end.loopexit:
+ %inc.lcssa1 = phi i32 [ %inc, %while.cond ], [ %inc, %while.body ]
+ br label %while.end
while.end:
- %inc.lcssa = phi i32 [ %inc, %while.body ], [ %inc, %while.cond ], [ %x, %entry ]
+ %inc.lcssa = phi i32 [ %x, %entry ], [ %inc.lcssa1, %while.end.loopexit ]
ret i32 %inc.lcssa
}
diff --git a/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll b/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll
index 4b0bc90..63470a9 100644
--- a/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll
+++ b/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll
@@ -63,8 +63,8 @@ define void @g(ptr %dst.1, ptr %start, i64 %N) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[LCSSA_PTR_IV_1]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[NEXT_GEP]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP5]], align 8
diff --git a/llvm/test/Transforms/LoopRotate/dbgvalue.ll b/llvm/test/Transforms/LoopRotate/dbgvalue.ll
index 92cc886..9ecc31e 100644
--- a/llvm/test/Transforms/LoopRotate/dbgvalue.ll
+++ b/llvm/test/Transforms/LoopRotate/dbgvalue.ll
@@ -220,7 +220,7 @@ for.end:
; Test that dbg.value intrinsincs adjacent to the `icmp slt i32 0, 0` get
; rotated as expected. The icmp is loop-invariant and so gets hoisted to the
-; preheader via a different code path. This is more difficult for DPValue
+; preheader via a different code path. This is more difficult for DbgVariableRecord
; debug-info records to handle, because they have to get detached and moved
; somewhere else during rotation.
define void @invariant_hoist() !dbg !70 {
diff --git a/llvm/test/Transforms/LoopRotate/update-branch-weights.ll b/llvm/test/Transforms/LoopRotate/update-branch-weights.ll
index 5d742b6..9a1f36e 100644
--- a/llvm/test/Transforms/LoopRotate/update-branch-weights.ll
+++ b/llvm/test/Transforms/LoopRotate/update-branch-weights.ll
@@ -232,6 +232,46 @@ loop_exit:
ret void
}
+; BFI_BEFORE-LABEL: block-frequency-info: func6_inaccurate_branch_weight
+; BFI_BEFORE: - entry: {{.*}} count = 1024
+; BFI_BEFORE: - loop_header: {{.*}} count = 2047
+; BFI_BEFORE: - loop_body: {{.*}} count = 1023
+; BFI_BEFORE: - loop_exit: {{.*}} count = 1024
+
+; BFI_AFTER-LABEL: block-frequency-info: func6_inaccurate_branch_weight
+; BFI_AFTER: - entry: {{.*}} count = 1024
+; BFI_AFTER: - loop_body: {{.*}} count = 1024
+; BFI_AFTER: - loop_exit: {{.*}} count = 1024
+
+; IR-LABEL: define void @func6_inaccurate_branch_weight(
+; IR: entry:
+; IR: br label %loop_body
+; IR: loop_body:
+; IR: br i1 %cmp, label %loop_body, label %loop_exit, !prof [[PROF_FUNC6_0:![0-9]+]]
+; IR: loop_exit:
+; IR: ret void
+
+; Branch weight from sample-based PGO may be inaccurate due to sampling.
+; Count for loop_body in following case should be not less than loop_exit.
+; However this may not hold for Sample-based PGO.
+define void @func6_inaccurate_branch_weight() !prof !3 {
+entry:
+ br label %loop_header
+
+loop_header:
+ %i = phi i32 [0, %entry], [%i_inc, %loop_body]
+ %cmp = icmp slt i32 %i, 2
+ br i1 %cmp, label %loop_body, label %loop_exit, !prof !9
+
+loop_body:
+ store volatile i32 %i, ptr @g, align 4
+ %i_inc = add i32 %i, 1
+ br label %loop_header
+
+loop_exit:
+ ret void
+}
+
!0 = !{!"function_entry_count", i64 1}
!1 = !{!"branch_weights", i32 1000, i32 1}
!2 = !{!"branch_weights", i32 3000, i32 1000}
@@ -241,6 +281,7 @@ loop_exit:
!6 = !{!"branch_weights", i32 0, i32 1}
!7 = !{!"branch_weights", i32 1, i32 0}
!8 = !{!"branch_weights", i32 0, i32 0}
+!9 = !{!"branch_weights", i32 1023, i32 1024}
; IR: [[PROF_FUNC0_0]] = !{!"branch_weights", i32 2000, i32 1000}
; IR: [[PROF_FUNC0_1]] = !{!"branch_weights", i32 999, i32 1}
@@ -251,3 +292,4 @@ loop_exit:
; IR: [[PROF_FUNC3_0]] = !{!"branch_weights", i32 0, i32 1}
; IR: [[PROF_FUNC4_0]] = !{!"branch_weights", i32 1, i32 0}
; IR: [[PROF_FUNC5_0]] = !{!"branch_weights", i32 0, i32 0}
+; IR: [[PROF_FUNC6_0]] = !{!"branch_weights", i32 0, i32 1024}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll
index 24c59fd..00ec396 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll
@@ -11,76 +11,74 @@ define void @test_widen_ptr_induction(ptr %ptr.start.1) {
; CHECK: vector.main.loop.iter.check:
; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR_START_1:%.*]], i64 10000
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x ptr> [[TMP2]], ptr [[NEXT_GEP1]], i32 1
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP2]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x ptr> [[TMP6]], ptr [[NEXT_GEP3]], i32 1
-; CHECK-NEXT: [[TMP8:%.*]] = icmp ne <2 x ptr> [[TMP3]], zeroinitializer
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <2 x ptr> [[TMP7]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP10]])
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP11]])
-; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP9]], i32 0
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP12]])
-; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP9]], i32 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]])
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 2
-; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP14]], align 1
-; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP15]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[PTR_START_1:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x ptr> [[TMP6]], ptr [[TMP5]], i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP8]], i32 0
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x ptr> [[TMP10]], ptr [[TMP9]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <2 x ptr> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <2 x ptr> [[TMP11]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP12]], i32 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP14]])
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP12]], i32 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP15]])
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP13]], i32 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP13]], i32 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP17]])
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP4]], i32 2
+; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP18]], align 1
+; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP19]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: [[IND_END6:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
+; CHECK-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
; CHECK-NEXT: br i1 true, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; CHECK: vec.epilog.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR_START_1]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[IND_END5:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
+; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX8:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT11:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX8]], 0
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP17]]
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX8]], 1
-; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP9]], i32 0
-; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x ptr> [[TMP19]], ptr [[NEXT_GEP10]], i32 1
-; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <2 x ptr> [[TMP20]], zeroinitializer
-; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP21]], i32 0
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP22]])
-; CHECK-NEXT: [[TMP23:%.*]] = extractelement <2 x i1> [[TMP21]], i32 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP23]])
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[NEXT_GEP9]], i32 0
-; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP24]], align 1
-; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[INDEX8]], 2
-; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT11]], 10000
-; CHECK-NEXT: br i1 [[TMP25]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
+; CHECK-NEXT: [[INDEX3:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX3]], 0
+; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX3]], 1
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP23]], i32 0
+; CHECK-NEXT: [[TMP26:%.*]] = insertelement <2 x ptr> [[TMP25]], ptr [[TMP24]], i32 1
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <2 x ptr> [[TMP26]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP27]], i32 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP28]])
+; CHECK-NEXT: [[TMP29:%.*]] = extractelement <2 x i1> [[TMP27]], i32 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP29]])
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i8, ptr [[TMP23]], i32 0
+; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP30]], align 1
+; CHECK-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX3]], 2
+; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 10000
+; CHECK-NEXT: br i1 [[TMP31]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 10000, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL7:%.*]] = phi ptr [ [[IND_END5]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END6]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR_START_1]], [[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 10000, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END1]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR_START_1]], [[ITER_CHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL4]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL7]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[CMP_I_I_I_I:%.*]] = icmp ne ptr [[PTR_IV]], null
; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP_I_I_I_I]])
; CHECK-NEXT: store i8 0, ptr [[PTR_IV]], align 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
index 1e79c3e..b915791 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
@@ -99,7 +99,7 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]]
@@ -117,7 +117,6 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP13]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP14]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT4]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2
; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX_NEXT]], [[TMP20]]
@@ -254,7 +253,7 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT6:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
@@ -284,7 +283,6 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP25]], i32 8, <vscale x 2 x i1> [[TMP23]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP28]], i32 8, <vscale x 2 x i1> [[TMP24]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT5]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 2
; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = add i64 [[INDEX_NEXT]], [[TMP30]]
@@ -437,7 +435,7 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT6:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
@@ -469,7 +467,6 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[TMP25]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[TMP26]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT5]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = mul i64 [[TMP31]], 2
; TFA_INTERLEAVE-NEXT: [[TMP33:%.*]] = add i64 [[INDEX_NEXT]], [[TMP32]]
@@ -771,7 +768,7 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]]
@@ -789,7 +786,6 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP13]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP14]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT4]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2
; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX_NEXT]], [[TMP20]]
@@ -970,7 +966,7 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[BROADCAST_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ]
@@ -997,7 +993,6 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x double> [[TMP14]], <vscale x 2 x double> shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double -0.000000e+00, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
; TFA_INTERLEAVE-NEXT: [[TMP26]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[TMP24]], <vscale x 2 x double> [[TMP25]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT4]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 2
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = add i64 [[INDEX_NEXT]], [[TMP28]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
index 1970ac9..809d2e8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
@@ -20,7 +20,7 @@ define i32 @pr70988() {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ult i64 1, [[UMAX]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT6:%.*]], [[PRED_LOAD_CONTINUE5:%.*]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE5:%.*]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_LOAD_CONTINUE5]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY1]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT7:%.*]], [[PRED_LOAD_CONTINUE5]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[PRED_LOAD_CONTINUE5]] ]
@@ -50,7 +50,6 @@ define i32 @pr70988() {
; CHECK-NEXT: [[TMP17]] = select i1 [[ACTIVE_LANE_MASK]], i32 [[TMP15]], i32 [[VEC_PHI]]
; CHECK-NEXT: [[TMP18]] = select i1 [[ACTIVE_LANE_MASK2]], i32 [[TMP16]], i32 [[VEC_PHI3]]
; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[INDEX_NEXT6]] = add i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX_NEXT]], 1
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = icmp ult i64 [[INDEX_NEXT]], [[UMAX]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT7]] = icmp ult i64 [[TMP19]], [[UMAX]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index fc67fb5..ad6e853 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -403,21 +403,6 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = sub i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -492,9 +477,9 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP78:%.*]] = mul i64 [[TMP77]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP79:%.*]] = add i64 [[INDEX]], [[TMP78]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP73]], i64 [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP76]], i64 [[TMP19]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP79]], i64 [[TMP24]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP73]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP76]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP79]], i64 [[TMP9]])
; CHECK-ORDERED-TF-NEXT: [[TMP80:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP81:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT12]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP82:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT13]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
@@ -1715,21 +1700,6 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = sub i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -1826,9 +1796,9 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP96:%.*]] = mul i64 [[TMP95]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP97:%.*]] = add i64 [[INDEX]], [[TMP96]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP19]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP24]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP9]])
; CHECK-ORDERED-TF-NEXT: [[TMP98:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP99:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT16]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP100:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT17]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
@@ -2129,21 +2099,6 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = sub i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -2240,9 +2195,9 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP96:%.*]] = mul i64 [[TMP95]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP97:%.*]] = add i64 [[INDEX]], [[TMP96]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP19]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP24]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP9]])
; CHECK-ORDERED-TF-NEXT: [[TMP98:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP99:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT16]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP100:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT17]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
index 24d2127..12889c2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
@@ -146,13 +146,13 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK-VF8: vec.epilog.vector.body:
-; CHECK-VF8-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0
+; CHECK-VF8-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX1]], 0
; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP19]]
; CHECK-VF8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[TMP20]], i32 0
; CHECK-VF8-NEXT: store <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, ptr [[TMP21]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX2]], 8
-; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024
+; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
+; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -246,13 +246,13 @@ define void @main_vf_vscale_x_2(ptr %A) #0 vscale_range(8, 8) {
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0
+; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX1]], 0
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP19]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[TMP20]], i32 0
; CHECK-NEXT: store <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, ptr [[TMP21]], align 1
-; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX2]], 8
-; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024
+; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
; CHECK-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -315,13 +315,13 @@ define void @main_vf_vscale_x_2(ptr %A) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK-VF8: vec.epilog.vector.body:
-; CHECK-VF8-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0
+; CHECK-VF8-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX1]], 0
; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP19]]
; CHECK-VF8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[TMP20]], i32 0
; CHECK-VF8-NEXT: store <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, ptr [[TMP21]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX2]], 8
-; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024
+; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
+; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -374,66 +374,65 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 32
-; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[N_VEC]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 16
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], [[TMP11]]
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 16
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP15]]
-; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP13]], align 1
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP14]], i32 0
+; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 16
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP18]]
; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP16]], align 1
+; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP19]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
+; CHECK-NEXT: [[IND_END4:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 10000, [[N_VEC]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 8
-; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], 8
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP22]]
; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; CHECK: vec.epilog.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 8
-; CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 10000, [[TMP21]]
-; CHECK-NEXT: [[N_VEC4:%.*]] = sub i64 10000, [[N_MOD_VF3]]
-; CHECK-NEXT: [[IND_END6:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC4]]
-; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 8
+; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 8
+; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 10000, [[TMP24]]
+; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 10000, [[N_MOD_VF2]]
+; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC3]]
+; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX10:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[INDEX10]], 0
-; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP24]]
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[NEXT_GEP11]], i32 0
-; CHECK-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP25]], align 1
-; CHECK-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX10]], [[TMP23]]
-; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT12]], [[N_VEC4]]
-; CHECK-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[INDEX7]], 0
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP27]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[TMP28]], i32 0
+; CHECK-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP29]], align 1
+; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX7]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: vec.epilog.middle.block:
-; CHECK-NEXT: [[CMP_N9:%.*]] = icmp eq i64 10000, [[N_VEC4]]
-; CHECK-NEXT: br i1 [[CMP_N9]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
+; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 10000, [[N_VEC3]]
+; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i64 [ [[N_VEC4]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[IND_END6]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END7]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL5]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL8]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL5]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: store i8 0, ptr [[PTR_IV]], align 1
; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV]], i64 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -457,57 +456,56 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]]
; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 32
-; CHECK-VF8-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[N_VEC]]
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-VF8-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0
-; CHECK-VF8-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP6]]
; CHECK-VF8-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-VF8-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
; CHECK-VF8-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 0
-; CHECK-VF8-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], [[TMP9]]
-; CHECK-VF8-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP10]]
-; CHECK-VF8-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-VF8-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 16
-; CHECK-VF8-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP13]]
-; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP11]], align 1
+; CHECK-VF8-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 1
+; CHECK-VF8-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], [[TMP10]]
+; CHECK-VF8-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[TMP6]]
+; CHECK-VF8-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP11]]
+; CHECK-VF8-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP12]], i32 0
+; CHECK-VF8-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-VF8-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 16
+; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP16]]
; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP14]], align 1
+; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP17]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-VF8: middle.block:
; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK-VF8: vec.epilog.iter.check:
-; CHECK-VF8-NEXT: [[IND_END4:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
+; CHECK-VF8-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 10000, [[N_VEC]]
; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8
; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; CHECK-VF8: vec.epilog.ph:
-; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-VF8-NEXT: [[IND_END3:%.*]] = getelementptr i8, ptr [[START]], i64 10000
+; CHECK-VF8-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 10000
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK-VF8: vec.epilog.vector.body:
-; CHECK-VF8-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-VF8-NEXT: [[TMP16:%.*]] = add i64 [[INDEX7]], 0
-; CHECK-VF8-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP16]]
-; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP8]], i32 0
-; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[TMP17]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], 8
-; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT9]], 10000
-; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-VF8-NEXT: [[INDEX3:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX3]], 0
+; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP19]]
+; CHECK-VF8-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i32 0
+; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[TMP21]], align 1
+; CHECK-VF8-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX3]], 8
+; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 10000
+; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK-VF8: vec.epilog.scalar.ph:
-; CHECK-VF8-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
-; CHECK-VF8-NEXT: [[BC_RESUME_VAL5:%.*]] = phi ptr [ [[IND_END3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
+; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-VF8-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END1]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[LOOP:%.*]]
; CHECK-VF8: loop:
-; CHECK-VF8-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL2]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-VF8-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL5]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-VF8-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-VF8-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-VF8-NEXT: store i8 0, ptr [[PTR_IV]], align 1
; CHECK-VF8-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV]], i64 1
; CHECK-VF8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
index cfb0f9e..8b64d7a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
@@ -23,54 +23,54 @@ define ptr @test(ptr %start.1, ptr %start.2, ptr %end) {
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 8
; CHECK-NEXT: [[IND_END3:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP34:%.*]] = mul i64 [[TMP33]], 4
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_1]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 8, [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP11]], 0
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP14]], i64 0
+; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 2
+; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 2
+; CHECK-NEXT: [[TMP15:%.*]] = mul i64 8, [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP13]], 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP16]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP15]]
-; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP16]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
-; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP11]], 1
-; CHECK-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP18]], i64 0
+; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP17]]
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP18]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
+; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP13]], 1
+; CHECK-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP20]], i64 0
; CHECK-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT6]], [[TMP19]]
-; CHECK-NEXT: [[VECTOR_GEP7:%.*]] = mul <vscale x 2 x i64> [[TMP20]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP7]]
-; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 8
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP23]]
-; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 2
-; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[TMP25]], 0
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], [[TMP26]]
+; CHECK-NEXT: [[TMP21:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT6]], [[TMP21]]
+; CHECK-NEXT: [[VECTOR_GEP7:%.*]] = mul <vscale x 2 x i64> [[TMP22]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP7]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 2
+; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 0
; CHECK-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 8
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP28]]
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP31:%.*]] = mul i64 [[TMP30]], 2
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 [[TMP31]]
-; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP29]], align 8
+; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[OFFSET_IDX]], [[TMP28]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP24]]
+; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i64, ptr [[TMP30]], i32 0
+; CHECK-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP34:%.*]] = mul i64 [[TMP33]], 2
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP30]], i64 [[TMP34]]
; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP32]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP34]]
-; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP13]]
-; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP35]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; CHECK-NEXT: [[CMO:%.*]] = sub i64 [[N_VEC]], 1
-; CHECK-NEXT: [[TMP36:%.*]] = mul i64 [[CMO]], 8
-; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP36]]
+; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[CMO]], 8
+; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP37]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START_1]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
index 1a6e83a..2acc1dd 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
@@ -25,21 +25,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 16
-; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 16
-; CHECK-NEXT: [[TMP22:%.*]] = sub i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 4
; CHECK-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -107,9 +92,9 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[TMP70:%.*]] = mul i64 [[TMP69]], 12
; CHECK-NEXT: [[TMP71:%.*]] = add i64 [[INDEX6]], [[TMP70]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX6]], i64 [[TMP9]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT11]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP65]], i64 [[TMP14]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP68]], i64 [[TMP19]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP71]], i64 [[TMP24]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT11]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP65]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP68]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP71]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP72:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP73:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT11]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP74:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT12]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
@@ -167,21 +152,6 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias %
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 16
-; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 16
-; CHECK-NEXT: [[TMP22:%.*]] = sub i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 4
; CHECK-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -275,9 +245,9 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias %
; CHECK-NEXT: [[TMP92:%.*]] = mul i64 [[TMP91]], 12
; CHECK-NEXT: [[TMP93:%.*]] = add i64 [[INDEX6]], [[TMP92]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX6]], i64 [[TMP9]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP87]], i64 [[TMP14]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT15]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP90]], i64 [[TMP19]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP93]], i64 [[TMP24]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP87]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT15]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP90]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP93]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP94:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP95:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT14]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP96:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT15]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
index 899fcce..3bab341e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
@@ -19,10 +19,12 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
-; CHECK-NEXT: EMIT ir<%ptr.iv.1> = WIDEN-POINTER-INDUCTION ir<%start.1>, 8
; CHECK-NEXT: EMIT ir<%ptr.iv.2> = WIDEN-POINTER-INDUCTION ir<%start.2>, 1
+; CHECK-NEXT: vp<[[PTR_IDX:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * ir<8>
+; CHECK-NEXT: vp<[[PTR_IDX_STEPS:%.+]]> = SCALAR-STEPS vp<[[PTR_IDX]]>, ir<8>
+; CHECK-NEXT: EMIT vp<[[PTR_IV_1:%.+]]> = ptradd ir<%start.1>, vp<[[PTR_IDX_STEPS]]>
; CHECK-NEXT: WIDEN-GEP Var[Inv] ir<%ptr.iv.2.next> = getelementptr inbounds ir<%ptr.iv.2>, ir<1>
-; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer ir<%ptr.iv.1>
+; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer vp<[[PTR_IV_1]]>
; CHECK-NEXT: WIDEN store vp<[[VEC_PTR]]>, ir<%ptr.iv.2.next>
; CHECK-NEXT: vp<[[VEC_PTR2:%.+]]> = vector-pointer ir<%ptr.iv.2>
; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VEC_PTR2]]>
@@ -59,9 +61,6 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP6]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 1
@@ -73,6 +72,9 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP12]]
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP13]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, <vscale x 2 x ptr> [[TMP14]], i64 1
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP15]], ptr [[TMP16]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index 7226048..126ceac 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -32,21 +32,20 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[C]], i64 [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 5
-; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[C]], i64 [[TMP8]]
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[TMP10]], i64 [[TMP9]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[WIDE_VEC3:%.*]] = load <vscale x 8 x i32>, ptr [[NEXT_GEP2]], align 4
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 5
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP9]], i64 [[TMP7]]
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <vscale x 8 x i32>, ptr [[TMP10]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC3]])
-; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC4]], 0
-; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC4]], 1
+; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC2]])
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC3]], 0
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC3]], 1
; CHECK-NEXT: [[TMP15:%.*]] = add nsw <vscale x 4 x i32> [[TMP11]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP16:%.*]] = add nsw <vscale x 4 x i32> [[TMP13]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
@@ -148,21 +147,21 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP7]]
-; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP8]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw i64 [[TMP9]], 2
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 [[TMP10]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 4 x i32>, ptr [[TMP11]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP7]], i64 [[TMP10]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP13:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD7]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP13:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD5]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP14]], 2
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[NEXT_GEP5]], i64 [[TMP15]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[NEXT_GEP5]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP15]]
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP8]], align 4
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP16]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -246,12 +245,12 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = shl <vscale x 2 x i64> [[TMP9]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 2, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
-; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 2 x ptr> [[TMP10]], i64 0
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP13]] = add <vscale x 2 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP10]], ptr [[NEXT_GEP]], align 8
+; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP10]], ptr [[TMP11]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll b/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
index 4957bbe..d8f14f3 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
@@ -40,9 +40,6 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
; INTERLEAVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]])
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 2
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP6]])
; INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 1
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]])
@@ -71,7 +68,7 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; INTERLEAVE-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 1
; INTERLEAVE-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], [[TMP21]]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]])
-; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP7]])
+; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP4]])
; INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; INTERLEAVE-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; INTERLEAVE: for.cond.cleanup:
@@ -129,9 +126,6 @@ define void @test_uniform_smaller_scalar(ptr noalias %dst, ptr readonly %src, i3
; INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
; INTERLEAVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]])
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 2
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP6]])
; INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 1
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]])
@@ -160,7 +154,7 @@ define void @test_uniform_smaller_scalar(ptr noalias %dst, ptr readonly %src, i3
; INTERLEAVE-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 1
; INTERLEAVE-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], [[TMP21]]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]])
-; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP7]])
+; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP4]])
; INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; INTERLEAVE-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
; INTERLEAVE: for.cond.cleanup:
@@ -207,7 +201,6 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; INTERLEAVE-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; INTERLEAVE-NEXT: entry:
; INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 2)
-; INTERLEAVE-NEXT: [[TMP1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 2)
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = icmp ne i64 [[N]], 0
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ugt i64 [[N]], 1
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -237,7 +230,7 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
; INTERLEAVE-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = icmp ult i64 [[INDEX]], [[TMP0]]
-; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = icmp ult i64 [[TMP11]], [[TMP1]]
+; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = icmp ult i64 [[TMP11]], [[TMP0]]
; INTERLEAVE-NEXT: br i1 [[ACTIVE_LANE_MASK_NEXT]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
; INTERLEAVE: for.cond.cleanup:
; INTERLEAVE-NEXT: ret void
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
index 938e1de..6953834 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
@@ -45,8 +45,8 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 %
; CHECK-NEXT: [[TMP9:%.*]] = fsub fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD6]]
; CHECK-NEXT: [[TMP10:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP9]])
; CHECK-NEXT: [[TMP11:%.*]] = fdiv fast <4 x float> [[TMP10]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[DOTNOT8]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[TMP11]]
-; CHECK-NEXT: [[PREDPHI]] = fadd fast <4 x float> [[VEC_PHI]], [[TMP12]]
+; CHECK-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[TMP11]], [[VEC_PHI]]
+; CHECK-NEXT: [[PREDPHI]] = select <4 x i1> [[DOTNOT8]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP12]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
new file mode 100644
index 0000000..4e38630
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -p loop-vectorize -mattr="+v" -S %s | FileCheck %s
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+; Test case for https://github.com/llvm/llvm-project/issues/87378.
+define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: define void @pr87378_vpinstruction_or_drop_poison_generating_flags(
+; CHECK-SAME: ptr [[ARG:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1001, [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1001, [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1001, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 8 x i64> [[TMP6]], zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP7]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP10]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP11]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[B]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[C]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 8 x i1> [[TMP13]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP17:%.*]] = or <vscale x 8 x i1> [[TMP15]], [[TMP16]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]]
+; CHECK-NEXT: [[TMP19:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = xor <vscale x 8 x i1> [[TMP14]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP20]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = or <vscale x 8 x i1> [[TMP19]], [[TMP21]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP19]], <vscale x 8 x i64> [[BROADCAST_SPLAT6]], <vscale x 8 x i64> shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 poison, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP23:%.*]] = extractelement <vscale x 8 x i64> [[PREDPHI]], i32 0
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i16, ptr [[TMP24]], i32 0
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> zeroinitializer, ptr [[TMP25]], i32 2, <vscale x 8 x i1> [[TMP22]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1001, [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
+; CHECK: loop.header:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[C_1:%.*]] = icmp ule i64 [[IV]], [[A]]
+; CHECK-NEXT: br i1 [[C_1]], label [[THEN_1:%.*]], label [[ELSE_1:%.*]]
+; CHECK: then.1:
+; CHECK-NEXT: [[C_2:%.*]] = icmp ule i64 [[IV]], [[B]]
+; CHECK-NEXT: br i1 [[C_2]], label [[ELSE_1]], label [[MERGE:%.*]]
+; CHECK: else.1:
+; CHECK-NEXT: [[C_3:%.*]] = icmp ule i64 [[IV]], [[C]]
+; CHECK-NEXT: br i1 [[C_3]], label [[THEN_2:%.*]], label [[LOOP_LATCH]]
+; CHECK: then.2:
+; CHECK-NEXT: br label [[MERGE]]
+; CHECK: merge:
+; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ poison, [[THEN_1]] ], [ [[IV]], [[THEN_2]] ]
+; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[IDX]]
+; CHECK-NEXT: store i16 0, ptr [[GETELEMENTPTR]], align 2
+; CHECK-NEXT: br label [[LOOP_LATCH]]
+; CHECK: loop.latch:
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV]], 1000
+; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %c.1 = icmp ule i64 %iv, %a
+ br i1 %c.1, label %then.1, label %else.1
+
+then.1:
+ %c.2 = icmp ule i64 %iv, %b
+ br i1 %c.2, label %else.1, label %merge
+
+else.1:
+ %c.3 = icmp ule i64 %iv, %c
+ br i1 %c.3, label %then.2, label %loop.latch
+
+then.2:
+ br label %merge
+
+merge:
+ %idx = phi i64 [ poison, %then.1 ], [ %iv, %then.2 ]
+ %getelementptr = getelementptr i16, ptr %arg, i64 %idx
+ store i16 0, ptr %getelementptr, align 2
+ br label %loop.latch
+
+loop.latch:
+ %iv.next = add i64 %iv, 1
+ %icmp = icmp eq i64 %iv, 1000
+ br i1 %icmp, label %exit, label %loop.header
+
+exit:
+ ret void
+}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
index e9541c1..6516b05 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
@@ -639,87 +639,84 @@ define void @test_gather_not_profitable_pr48429(i32 %d, ptr readonly noalias %pt
; AVX512: vector.ph:
; AVX512-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 16
; AVX512-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
-; AVX512-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 4
-; AVX512-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP13]]
-; AVX512-NEXT: [[TMP14:%.*]] = mul i64 [[N_VEC]], 64
-; AVX512-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP14]]
+; AVX512-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 64
+; AVX512-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP13]]
; AVX512-NEXT: br label [[VECTOR_BODY:%.*]]
; AVX512: vector.body:
; AVX512-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[DEST]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; AVX512-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
-; AVX512-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
-; AVX512-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP16]]
-; AVX512-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <16 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448, i64 512, i64 576, i64 640, i64 704, i64 768, i64 832, i64 896, i64 960>
-; AVX512-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; AVX512-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP19]], align 4, !alias.scope !8
-; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD]], <16 x ptr> [[TMP17]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !11, !noalias !13
-; AVX512-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[NEXT_GEP]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x float>, ptr [[TMP20]], align 4, !alias.scope !15
-; AVX512-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, <16 x ptr> [[TMP17]], i64 1
-; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD8]], <16 x ptr> [[TMP21]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !11, !noalias !13
+; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <16 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448, i64 512, i64 576, i64 640, i64 704, i64 768, i64 832, i64 896, i64 960>
+; AVX512-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
+; AVX512-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 0
+; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP15]]
+; AVX512-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM]]
+; AVX512-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP18]], align 4, !alias.scope [[META8:![0-9]+]]
+; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD]], <16 x ptr> [[TMP14]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]]
+; AVX512-NEXT: [[TMP19:%.*]] = getelementptr float, ptr [[TMP16]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x float>, ptr [[TMP19]], align 4, !alias.scope [[META15:![0-9]+]]
+; AVX512-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, <16 x ptr> [[TMP14]], i64 1
+; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD8]], <16 x ptr> [[TMP20]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META11]], !noalias [[META13]]
; AVX512-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; AVX512-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 1024
-; AVX512-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; AVX512-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; AVX512-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; AVX512-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; AVX512-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; AVX512: vec.epilog.iter.check:
-; AVX512-NEXT: [[TMP23:%.*]] = mul i64 [[N_VEC]], 64
-; AVX512-NEXT: [[IND_END17:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP23]]
-; AVX512-NEXT: [[TMP24:%.*]] = mul i64 [[N_VEC]], 4
-; AVX512-NEXT: [[IND_END14:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP24]]
+; AVX512-NEXT: [[TMP22:%.*]] = mul i64 [[N_VEC]], 64
+; AVX512-NEXT: [[IND_END15:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP22]]
+; AVX512-NEXT: [[TMP23:%.*]] = mul i64 [[N_VEC]], 4
+; AVX512-NEXT: [[IND_END12:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP23]]
; AVX512-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]]
; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8
; AVX512-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; AVX512: vec.epilog.ph:
-; AVX512-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; AVX512-NEXT: [[BC_RESUME_VAL10:%.*]] = phi ptr [ [[IND_END9]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; AVX512-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; AVX512-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; AVX512-NEXT: [[N_MOD_VF11:%.*]] = urem i64 [[TMP3]], 8
-; AVX512-NEXT: [[N_VEC12:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF11]]
-; AVX512-NEXT: [[TMP25:%.*]] = mul i64 [[N_VEC12]], 4
-; AVX512-NEXT: [[IND_END13:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP25]]
-; AVX512-NEXT: [[TMP26:%.*]] = mul i64 [[N_VEC12]], 64
-; AVX512-NEXT: [[IND_END16:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP26]]
+; AVX512-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[TMP3]], 8
+; AVX512-NEXT: [[N_VEC10:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF9]]
+; AVX512-NEXT: [[TMP24:%.*]] = mul i64 [[N_VEC10]], 4
+; AVX512-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP24]]
+; AVX512-NEXT: [[TMP25:%.*]] = mul i64 [[N_VEC10]], 64
+; AVX512-NEXT: [[IND_END14:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP25]]
; AVX512-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; AVX512: vec.epilog.vector.body:
-; AVX512-NEXT: [[POINTER_PHI22:%.*]] = phi ptr [ [[BC_RESUME_VAL10]], [[VEC_EPILOG_PH]] ], [ [[PTR_IND23:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; AVX512-NEXT: [[INDEX20:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT26:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; AVX512-NEXT: [[TMP27:%.*]] = add i64 [[INDEX20]], 0
-; AVX512-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 4
-; AVX512-NEXT: [[NEXT_GEP21:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP28]]
-; AVX512-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[POINTER_PHI22]], <8 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448>
-; AVX512-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP21]], i64 [[IDXPROM]]
-; AVX512-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, ptr [[TMP30]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD24:%.*]] = load <8 x float>, ptr [[TMP31]], align 4, !alias.scope !17
-; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD24]], <8 x ptr> [[TMP29]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !20, !noalias !22
-; AVX512-NEXT: [[TMP32:%.*]] = getelementptr float, ptr [[NEXT_GEP21]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD25:%.*]] = load <8 x float>, ptr [[TMP32]], align 4, !alias.scope !24
-; AVX512-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, <8 x ptr> [[TMP29]], i64 1
-; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD25]], <8 x ptr> [[TMP33]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !20, !noalias !22
-; AVX512-NEXT: [[INDEX_NEXT26]] = add nuw i64 [[INDEX20]], 8
-; AVX512-NEXT: [[PTR_IND23]] = getelementptr i8, ptr [[POINTER_PHI22]], i64 512
-; AVX512-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT26]], [[N_VEC12]]
-; AVX512-NEXT: br i1 [[TMP34]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; AVX512-NEXT: [[POINTER_PHI19:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[PTR_IND20:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; AVX512-NEXT: [[INDEX18:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT24:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; AVX512-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[POINTER_PHI19]], <8 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448>
+; AVX512-NEXT: [[OFFSET_IDX21:%.*]] = mul i64 [[INDEX18]], 4
+; AVX512-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX21]], 0
+; AVX512-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP27]]
+; AVX512-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[IDXPROM]]
+; AVX512-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[TMP29]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD22:%.*]] = load <8 x float>, ptr [[TMP30]], align 4, !alias.scope [[META17:![0-9]+]]
+; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD22]], <8 x ptr> [[TMP26]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META20:![0-9]+]], !noalias [[META22:![0-9]+]]
+; AVX512-NEXT: [[TMP31:%.*]] = getelementptr float, ptr [[TMP28]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD23:%.*]] = load <8 x float>, ptr [[TMP31]], align 4, !alias.scope [[META24:![0-9]+]]
+; AVX512-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, <8 x ptr> [[TMP26]], i64 1
+; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD23]], <8 x ptr> [[TMP32]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META20]], !noalias [[META22]]
+; AVX512-NEXT: [[INDEX_NEXT24]] = add nuw i64 [[INDEX18]], 8
+; AVX512-NEXT: [[PTR_IND20]] = getelementptr i8, ptr [[POINTER_PHI19]], i64 512
+; AVX512-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT24]], [[N_VEC10]]
+; AVX512-NEXT: br i1 [[TMP33]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; AVX512: vec.epilog.middle.block:
-; AVX512-NEXT: [[CMP_N19:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC12]]
-; AVX512-NEXT: br i1 [[CMP_N19]], label [[FOR_END]], label [[VEC_EPILOG_SCALAR_PH]]
+; AVX512-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC10]]
+; AVX512-NEXT: br i1 [[CMP_N17]], label [[FOR_END]], label [[VEC_EPILOG_SCALAR_PH]]
; AVX512: vec.epilog.scalar.ph:
-; AVX512-NEXT: [[BC_RESUME_VAL15:%.*]] = phi ptr [ [[IND_END13]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END14]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR]], [[VECTOR_MEMCHECK]] ], [ [[PTR]], [[ITER_CHECK]] ]
-; AVX512-NEXT: [[BC_RESUME_VAL18:%.*]] = phi ptr [ [[IND_END16]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END17]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MEMCHECK]] ], [ [[DEST]], [[ITER_CHECK]] ]
+; AVX512-NEXT: [[BC_RESUME_VAL13:%.*]] = phi ptr [ [[IND_END11]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END12]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR]], [[VECTOR_MEMCHECK]] ], [ [[PTR]], [[ITER_CHECK]] ]
+; AVX512-NEXT: [[BC_RESUME_VAL16:%.*]] = phi ptr [ [[IND_END14]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END15]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MEMCHECK]] ], [ [[DEST]], [[ITER_CHECK]] ]
; AVX512-NEXT: br label [[FOR_BODY:%.*]]
; AVX512: for.body:
-; AVX512-NEXT: [[PTR_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL15]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
-; AVX512-NEXT: [[DEST_ADDR_011:%.*]] = phi ptr [ [[BC_RESUME_VAL18]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD_PTR6:%.*]], [[FOR_BODY]] ]
+; AVX512-NEXT: [[PTR_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL13]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
+; AVX512-NEXT: [[DEST_ADDR_011:%.*]] = phi ptr [ [[BC_RESUME_VAL16]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD_PTR6:%.*]], [[FOR_BODY]] ]
; AVX512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[PTR_ADDR_012]], i64 [[IDXPROM]]
-; AVX512-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; AVX512-NEXT: store float [[TMP35]], ptr [[DEST_ADDR_011]], align 4
-; AVX512-NEXT: [[TMP36:%.*]] = load float, ptr [[PTR_ADDR_012]], align 4
+; AVX512-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; AVX512-NEXT: store float [[TMP34]], ptr [[DEST_ADDR_011]], align 4
+; AVX512-NEXT: [[TMP35:%.*]] = load float, ptr [[PTR_ADDR_012]], align 4
; AVX512-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[DEST_ADDR_011]], i64 1
-; AVX512-NEXT: store float [[TMP36]], ptr [[ARRAYIDX5]], align 4
+; AVX512-NEXT: store float [[TMP35]], ptr [[ARRAYIDX5]], align 4
; AVX512-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[PTR_ADDR_012]], i64 1
; AVX512-NEXT: [[ADD_PTR6]] = getelementptr inbounds float, ptr [[DEST_ADDR_011]], i64 16
; AVX512-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[ADD_PTR]]
@@ -774,30 +771,29 @@ define void @test_gather_not_profitable_pr48429(i32 %d, ptr readonly noalias %pt
; FVW2-NEXT: br label [[VECTOR_BODY:%.*]]
; FVW2: vector.body:
; FVW2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; FVW2-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
-; FVW2-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
-; FVW2-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP16]]
-; FVW2-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 0
-; FVW2-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 64
-; FVW2-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP18]]
-; FVW2-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 1
-; FVW2-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 64
-; FVW2-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP20]]
-; FVW2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; FVW2-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
+; FVW2-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 0
+; FVW2-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP15]]
+; FVW2-NEXT: [[OFFSET_IDX9:%.*]] = mul i64 [[INDEX]], 64
+; FVW2-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX9]], 0
+; FVW2-NEXT: [[TMP18:%.*]] = add i64 [[OFFSET_IDX9]], 64
+; FVW2-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP17]]
+; FVW2-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP18]]
+; FVW2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM]]
; FVW2-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 0
-; FVW2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP22]], align 4, !alias.scope !8
+; FVW2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP22]], align 4, !alias.scope [[META8:![0-9]+]]
; FVW2-NEXT: [[TMP23:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 0
-; FVW2-NEXT: store float [[TMP23]], ptr [[NEXT_GEP9]], align 4, !alias.scope !11, !noalias !13
+; FVW2-NEXT: store float [[TMP23]], ptr [[TMP19]], align 4, !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]]
; FVW2-NEXT: [[TMP24:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 1
-; FVW2-NEXT: store float [[TMP24]], ptr [[NEXT_GEP10]], align 4, !alias.scope !11, !noalias !13
-; FVW2-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[NEXT_GEP]], i32 0
-; FVW2-NEXT: [[WIDE_LOAD11:%.*]] = load <2 x float>, ptr [[TMP25]], align 4, !alias.scope !15
-; FVW2-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP9]], i64 1
-; FVW2-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP10]], i64 1
-; FVW2-NEXT: [[TMP28:%.*]] = extractelement <2 x float> [[WIDE_LOAD11]], i32 0
-; FVW2-NEXT: store float [[TMP28]], ptr [[TMP26]], align 4, !alias.scope !11, !noalias !13
-; FVW2-NEXT: [[TMP29:%.*]] = extractelement <2 x float> [[WIDE_LOAD11]], i32 1
-; FVW2-NEXT: store float [[TMP29]], ptr [[TMP27]], align 4, !alias.scope !11, !noalias !13
+; FVW2-NEXT: store float [[TMP24]], ptr [[TMP20]], align 4, !alias.scope [[META11]], !noalias [[META13]]
+; FVW2-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[TMP16]], i32 0
+; FVW2-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x float>, ptr [[TMP25]], align 4, !alias.scope [[META15:![0-9]+]]
+; FVW2-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 1
+; FVW2-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 1
+; FVW2-NEXT: [[TMP28:%.*]] = extractelement <2 x float> [[WIDE_LOAD10]], i32 0
+; FVW2-NEXT: store float [[TMP28]], ptr [[TMP26]], align 4, !alias.scope [[META11]], !noalias [[META13]]
+; FVW2-NEXT: [[TMP29:%.*]] = extractelement <2 x float> [[WIDE_LOAD10]], i32 1
+; FVW2-NEXT: store float [[TMP29]], ptr [[TMP27]], align 4, !alias.scope [[META11]], !noalias [[META13]]
; FVW2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; FVW2-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; FVW2-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll
index 022912f..1b0118e 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll
@@ -25,15 +25,14 @@ define void @test_pr55375_interleave_opaque_ptr(ptr %start, ptr %end) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> [[TMP9]], ptr [[NEXT_GEP3]], i32 1
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 16
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 16
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP7]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> [[TMP9]], ptr [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr ptr, ptr [[TMP7]], i32 0
; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x ptr> zeroinitializer, <2 x ptr> [[TMP10]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x ptr> [[TMP12]], <4 x ptr> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
; CHECK-NEXT: store <4 x ptr> [[INTERLEAVED_VEC]], ptr [[TMP11]], align 8
@@ -53,7 +52,7 @@ define void @test_pr55375_interleave_opaque_ptr(ptr %start, ptr %end) {
; CHECK-NEXT: store ptr null, ptr [[IV]], align 8
; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds [[PAIR]], ptr [[IV]], i64 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]]
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
index 14acb6f..3f38abc 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
@@ -29,7 +29,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i64> [[VEC_IND]], <i64 1, i64 1, i64 1, i64 1>
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i1> [[TMP3]], <4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP0]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[ARR]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i32 -3
diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
index 51d2648..dc474fb 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -273,64 +273,58 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE19:%.*]] ]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT12:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT13:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT12]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT13]], <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE20:%.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[OFFSET_IDX8:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT13:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT14:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT13]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT14]], <i64 0, i64 1, i64 2, i64 3>
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[Q:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[NEXT_GEP8]], align 16
-; CHECK-NEXT: store i32 [[TMP7]], ptr [[NEXT_GEP]], align 16
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[Q:%.*]], i64 [[OFFSET_IDX8]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[NEXT_GEP9]], align 16
+; CHECK-NEXT: store i32 [[TMP5]], ptr [[NEXT_GEP]], align 16
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
-; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]]
-; CHECK: pred.store.if14:
-; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[TMP9]], 4
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]]
-; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[TMP11]], 4
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[NEXT_GEP9]], align 16
-; CHECK-NEXT: store i32 [[TMP13]], ptr [[NEXT_GEP5]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE15]]
-; CHECK: pred.store.continue15:
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
-; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17:%.*]]
-; CHECK: pred.store.if16:
-; CHECK-NEXT: [[TMP15:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP16:%.*]] = or disjoint i64 [[TMP15]], 8
-; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP18:%.*]] = or disjoint i64 [[TMP17]], 8
-; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[NEXT_GEP10]], align 16
-; CHECK-NEXT: store i32 [[TMP19]], ptr [[NEXT_GEP6]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE17]]
-; CHECK: pred.store.continue17:
-; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
-; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF18:%.*]], label [[PRED_STORE_CONTINUE19]]
-; CHECK: pred.store.if18:
-; CHECK-NEXT: [[TMP21:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = or disjoint i64 [[TMP21]], 12
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP22]]
-; CHECK-NEXT: [[TMP23:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP24:%.*]] = or disjoint i64 [[TMP23]], 12
-; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP24]]
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[NEXT_GEP11]], align 16
-; CHECK-NEXT: store i32 [[TMP25]], ptr [[NEXT_GEP7]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE19]]
-; CHECK: pred.store.continue19:
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
+; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16:%.*]]
+; CHECK: pred.store.if15:
+; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[OFFSET_IDX]], 4
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = or disjoint i64 [[OFFSET_IDX8]], 4
+; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[NEXT_GEP10]], align 16
+; CHECK-NEXT: store i32 [[TMP9]], ptr [[NEXT_GEP5]], align 16
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE16]]
+; CHECK: pred.store.continue16:
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
+; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF17:%.*]], label [[PRED_STORE_CONTINUE18:%.*]]
+; CHECK: pred.store.if17:
+; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[OFFSET_IDX8]], 8
+; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[NEXT_GEP11]], align 16
+; CHECK-NEXT: store i32 [[TMP13]], ptr [[NEXT_GEP6]], align 16
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE18]]
+; CHECK: pred.store.continue18:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
+; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20]]
+; CHECK: pred.store.if19:
+; CHECK-NEXT: [[TMP15:%.*]] = or disjoint i64 [[OFFSET_IDX]], 12
+; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP16:%.*]] = or disjoint i64 [[OFFSET_IDX8]], 12
+; CHECK-NEXT: [[NEXT_GEP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[NEXT_GEP12]], align 16
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[NEXT_GEP7]], align 16
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]]
+; CHECK: pred.store.continue20:
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -410,24 +404,24 @@ define void @example23b(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[TMP2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[WIDE_LOAD]] to <4 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw <4 x i32> [[TMP3]], <i32 7, i32 7, i32 7, i32 7>
-; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i16> [[WIDE_LOAD]] to <4 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw <4 x i32> [[TMP1]], <i32 7, i32 7, i32 7, i32 7>
+; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br i1 true, label [[TMP7:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br i1 true, label [[TMP5:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[TMP6:%.*]]
-; CHECK: 6:
-; CHECK-NEXT: br i1 poison, label [[TMP7]], label [[TMP6]], !llvm.loop [[LOOP11:![0-9]+]]
-; CHECK: 7:
+; CHECK-NEXT: br label [[TMP4:%.*]]
+; CHECK: 4:
+; CHECK-NEXT: br i1 poison, label [[TMP5]], label [[TMP4]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: 5:
; CHECK-NEXT: ret void
;
br label %1
@@ -457,7 +451,9 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE16:%.*]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE17:%.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = shl i64 [[INDEX]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
@@ -465,71 +461,63 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i32 [[TMP6]], 7
-; CHECK-NEXT: store i32 [[TMP7]], ptr [[NEXT_GEP7]], align 4
+; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX7]]
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[NEXT_GEP]], align 2
+; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[TMP3]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 7
+; CHECK-NEXT: store i32 [[TMP5]], ptr [[NEXT_GEP8]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
-; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]]
-; CHECK: pred.store.if11:
-; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[TMP9]], 4
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP10]]
-; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[TMP11]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i16, ptr [[NEXT_GEP4]], align 2
-; CHECK-NEXT: [[TMP14:%.*]] = zext i16 [[TMP13]] to i32
-; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i32 [[TMP14]], 7
-; CHECK-NEXT: store i32 [[TMP15]], ptr [[NEXT_GEP8]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
-; CHECK: pred.store.continue12:
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
-; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14:%.*]]
-; CHECK: pred.store.if13:
-; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP18:%.*]] = or disjoint i64 [[TMP17]], 8
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP19]], 4
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP20]]
-; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP5]], align 2
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
+; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
+; CHECK: pred.store.if12:
+; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[OFFSET_IDX7]], 4
+; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = or disjoint i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[NEXT_GEP4]], align 2
+; CHECK-NEXT: [[TMP10:%.*]] = zext i16 [[TMP9]] to i32
+; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i32 [[TMP10]], 7
+; CHECK-NEXT: store i32 [[TMP11]], ptr [[NEXT_GEP9]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE13]]
+; CHECK: pred.store.continue13:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
+; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]]
+; CHECK: pred.store.if14:
+; CHECK-NEXT: [[TMP13:%.*]] = or disjoint i64 [[OFFSET_IDX7]], 8
+; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = or disjoint i64 [[OFFSET_IDX]], 4
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i16, ptr [[NEXT_GEP5]], align 2
+; CHECK-NEXT: [[TMP16:%.*]] = zext i16 [[TMP15]] to i32
+; CHECK-NEXT: [[TMP17:%.*]] = shl nuw nsw i32 [[TMP16]], 7
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[NEXT_GEP10]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE15]]
+; CHECK: pred.store.continue15:
+; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
+; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17]]
+; CHECK: pred.store.if16:
+; CHECK-NEXT: [[TMP19:%.*]] = or disjoint i64 [[OFFSET_IDX7]], 12
+; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = or disjoint i64 [[OFFSET_IDX]], 6
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP6]], align 2
; CHECK-NEXT: [[TMP22:%.*]] = zext i16 [[TMP21]] to i32
; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i32 [[TMP22]], 7
-; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP9]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE14]]
-; CHECK: pred.store.continue14:
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
-; CHECK-NEXT: br i1 [[TMP24]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16]]
-; CHECK: pred.store.if15:
-; CHECK-NEXT: [[TMP25:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP26:%.*]] = or disjoint i64 [[TMP25]], 12
-; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP26]]
-; CHECK-NEXT: [[TMP27:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP28:%.*]] = or disjoint i64 [[TMP27]], 6
-; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP28]]
-; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[NEXT_GEP6]], align 2
-; CHECK-NEXT: [[TMP30:%.*]] = zext i16 [[TMP29]] to i32
-; CHECK-NEXT: [[TMP31:%.*]] = shl nuw nsw i32 [[TMP30]], 7
-; CHECK-NEXT: store i32 [[TMP31]], ptr [[NEXT_GEP10]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE16]]
-; CHECK: pred.store.continue16:
+; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP11]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE17]]
+; CHECK: pred.store.continue17:
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
-; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
+; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br i1 true, label [[TMP34:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br i1 true, label [[TMP26:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[TMP33:%.*]]
-; CHECK: 33:
-; CHECK-NEXT: br i1 poison, label [[TMP34]], label [[TMP33]], !llvm.loop [[LOOP13:![0-9]+]]
-; CHECK: 34:
+; CHECK-NEXT: br label [[TMP25:%.*]]
+; CHECK: 25:
+; CHECK-NEXT: br i1 poison, label [[TMP26]], label [[TMP25]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: 26:
; CHECK-NEXT: ret void
;
br label %1
diff --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
index cc7c1d8..1e23f02 100644
--- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
+++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
@@ -311,15 +311,12 @@ for.end:
; INTER: vector.body
; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; INTER: %[[I0:.+]] = shl i64 %index, 4
+; INTER: %[[I1:.+]] = or disjoint i64 %[[I0]], 16
+; INTER: %[[I2:.+]] = or disjoint i64 %[[I0]], 32
+; INTER: %[[I3:.+]] = or disjoint i64 %[[I0]], 48
; INTER: %next.gep = getelementptr i8, ptr %a, i64 %[[I0]]
-; INTER: %[[S1:.+]] = shl i64 %index, 4
-; INTER: %[[I1:.+]] = or disjoint i64 %[[S1]], 16
; INTER: %next.gep2 = getelementptr i8, ptr %a, i64 %[[I1]]
-; INTER: %[[S2:.+]] = shl i64 %index, 4
-; INTER: %[[I2:.+]] = or disjoint i64 %[[S2]], 32
; INTER: %next.gep3 = getelementptr i8, ptr %a, i64 %[[I2]]
-; INTER: %[[S3:.+]] = shl i64 %index, 4
-; INTER: %[[I3:.+]] = or disjoint i64 %[[S3]], 48
; INTER: %next.gep4 = getelementptr i8, ptr %a, i64 %[[I3]]
; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
;
@@ -361,15 +358,12 @@ for.end:
; CHECK: vector.body
; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; CHECK: [[SHL1:%.+]] = shl i64 %index, 4
+; CHECK: %[[I1:.+]] = or disjoint i64 [[SHL1]], 16
+; CHECK: %[[I2:.+]] = or disjoint i64 [[SHL1]], 32
+; CHECK: %[[I3:.+]] = or disjoint i64 [[SHL1]], 48
; CHECK: %next.gep = getelementptr i8, ptr %a, i64 [[SHL1]]
-; CHECK: [[SHL2:%.+]] = shl i64 %index, 4
-; CHECK: %[[I1:.+]] = or disjoint i64 [[SHL2]], 16
; CHECK: %next.gep2 = getelementptr i8, ptr %a, i64 %[[I1]]
-; CHECK: [[SHL3:%.+]] = shl i64 %index, 4
-; CHECK: %[[I2:.+]] = or disjoint i64 [[SHL3]], 32
; CHECK: %next.gep3 = getelementptr i8, ptr %a, i64 %[[I2]]
-; CHECK: [[SHL4:%.+]] = shl i64 %index, 4
-; CHECK: %[[I3:.+]] = or disjoint i64 [[SHL4]], 48
; CHECK: %next.gep4 = getelementptr i8, ptr %a, i64 %[[I3]]
; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
;
diff --git a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
index 7f20963..2c665a4 100644
--- a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
@@ -7,7 +7,7 @@ define void @foo(ptr %h) !dbg !4 {
; CHECK-LABEL: define void @foo(
; CHECK-SAME: ptr [[H:%.*]]) !dbg [[DBG4:![0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i64 0, metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]]
+; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i64 0, metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]]
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]], !dbg [[DBG21:![0-9]+]]
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]], !dbg [[DBG21]]
@@ -27,15 +27,15 @@ define void @foo(ptr %h) !dbg !4 {
; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> <i32 3, i32 3, i32 3, i32 3>, <4 x ptr> [[TMP3]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !dbg [[DBG22]]
; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], <i64 1, i64 1, i64 1, i64 1>, !dbg [[DBG24:![0-9]+]]
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], <i64 5, i64 5, i64 5, i64 5>, !dbg [[DBG25:![0-9]+]]
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0
-; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP32]], label [[FOR_COND5_PREHEADER1]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0, !dbg [[DBG26:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP32]], label [[FOR_COND5_PREHEADER1]], !dbg [[DBG26]]
; CHECK: for.cond.cleanup32:
-; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw <4 x i64> [[VEC_IND]], <i64 1, i64 1, i64 1, i64 1>, !dbg [[DBG26:![0-9]+]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i64> [[TMP7]], <i64 23, i64 23, i64 23, i64 23>, !dbg [[DBG27:![0-9]+]]
+; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw <4 x i64> [[VEC_IND]], <i64 1, i64 1, i64 1, i64 1>, !dbg [[DBG27:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i64> [[TMP7]], <i64 23, i64 23, i64 23, i64 23>, !dbg [[DBG28:![0-9]+]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[SCALAR_PH]], !dbg [[DBG21]]
; CHECK: scalar.ph:
@@ -43,8 +43,8 @@ define void @foo(ptr %h) !dbg !4 {
; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]], !dbg [[DBG21]]
; CHECK: for.cond1.preheader:
; CHECK-NEXT: [[I_023:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC13:%.*]], [[FOR_COND_CLEANUP3:%.*]] ]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i64 [[I_023]], metadata [[META11]], metadata !DIExpression()), !dbg [[DBG20]]
-; CHECK-NEXT: br label [[FOR_COND5_PREHEADER:%.*]], !dbg [[DBG32:![0-9]+]]
+; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i64 [[I_023]], metadata [[META11]], metadata !DIExpression()), !dbg [[DBG20]]
+; CHECK-NEXT: br label [[FOR_COND5_PREHEADER:%.*]], !dbg [[DBG26]]
; CHECK: for.cond5.preheader:
; CHECK-NEXT: [[L_022:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INC10:%.*]], [[FOR_COND5_PREHEADER]] ]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[H]], i64 [[L_022]]
@@ -57,11 +57,11 @@ define void @foo(ptr %h) !dbg !4 {
; CHECK-NEXT: store i32 3, ptr [[ARRAYIDX_3]], align 4, !dbg [[DBG22]]
; CHECK-NEXT: [[INC10]] = add nuw nsw i64 [[L_022]], 1, !dbg [[DBG24]]
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC10]], 5, !dbg [[DBG25]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP3]], label [[FOR_COND5_PREHEADER]], !dbg [[DBG32]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP3]], label [[FOR_COND5_PREHEADER]], !dbg [[DBG26]]
; CHECK: for.cond.cleanup3:
-; CHECK-NEXT: [[INC13]] = add nuw nsw i64 [[I_023]], 1, !dbg [[DBG26]]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i64 [[INC13]], metadata [[META11]], metadata !DIExpression()), !dbg [[DBG20]]
-; CHECK-NEXT: [[EXITCOND24_NOT:%.*]] = icmp eq i64 [[INC13]], 23, !dbg [[DBG27]]
+; CHECK-NEXT: [[INC13]] = add nuw nsw i64 [[I_023]], 1, !dbg [[DBG27]]
+; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i64 [[INC13]], metadata [[META11]], metadata !DIExpression()), !dbg [[DBG20]]
+; CHECK-NEXT: [[EXITCOND24_NOT:%.*]] = icmp eq i64 [[INC13]], 23, !dbg [[DBG28]]
; CHECK-NEXT: br i1 [[EXITCOND24_NOT]], label [[EXIT]], label [[FOR_COND1_PREHEADER]], !dbg [[DBG21]], !llvm.loop [[LOOP34:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void, !dbg [[DBG35:![0-9]+]]
@@ -163,14 +163,14 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
; CHECK: [[META23]] = distinct !DILexicalBlock(scope: [[META18]], file: [[META1]], line: 12, column: 7)
; CHECK: [[DBG24]] = !DILocation(line: 11, column: 32, scope: [[META19]])
; CHECK: [[DBG25]] = !DILocation(line: 11, column: 26, scope: [[META19]])
-; CHECK: [[DBG26]] = !DILocation(line: 10, column: 30, scope: [[META16]])
-; CHECK: [[DBG27]] = !DILocation(line: 10, column: 24, scope: [[META16]])
-; CHECK: [[LOOP28]] = distinct !{[[LOOP28]], [[DBG21]], [[META29:![0-9]+]], [[META30:![0-9]+]], [[META31:![0-9]+]]}
-; CHECK: [[META29]] = !DILocation(line: 13, column: 13, scope: [[META12]])
-; CHECK: [[META30]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META31]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[DBG32]] = !DILocation(line: 11, column: 5, scope: [[META15]])
+; CHECK: [[DBG26]] = !DILocation(line: 11, column: 5, scope: [[META15]])
+; CHECK: [[DBG27]] = !DILocation(line: 10, column: 30, scope: [[META16]])
+; CHECK: [[DBG28]] = !DILocation(line: 10, column: 24, scope: [[META16]])
+; CHECK: [[LOOP29]] = distinct !{[[LOOP29]], [[DBG21]], [[META30:![0-9]+]], [[META31:![0-9]+]], [[META32:![0-9]+]]}
+; CHECK: [[META30]] = !DILocation(line: 13, column: 13, scope: [[META12]])
+; CHECK: [[META31]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META32]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[DBG33]] = !DILocation(line: 13, column: 2, scope: [[META23]])
-; CHECK: [[LOOP34]] = distinct !{[[LOOP34]], [[DBG21]], [[META29]], [[META30]]}
+; CHECK: [[LOOP34]] = distinct !{[[LOOP34]], [[DBG21]], [[META30]], [[META31]]}
; CHECK: [[DBG35]] = !DILocation(line: 14, column: 1, scope: [[DBG4]])
;.
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
index b451d4b..0a37e5e 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -1537,92 +1537,85 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; UNROLL-NO-IC-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NO-IC: vector.body:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP48:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VEC_PHI9:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP49:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP39:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
-; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1
-; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
-; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2
-; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP5]]
-; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 3
-; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP7]]
-; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 4
-; UNROLL-NO-IC-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP9]]
-; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 5
-; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP11]]
-; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 6
-; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP13]]
-; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 7
-; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP15]]
-; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP20:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP5]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP6]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP22:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP7]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP8]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP16]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP25:%.*]] = load double, ptr [[TMP17]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP26:%.*]] = load double, ptr [[TMP18]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP27:%.*]] = load double, ptr [[TMP19]], align 8
+; UNROLL-NO-IC-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP40:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP41:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP31:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 200
+; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
+; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 200
+; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 400
+; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 600
+; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 800
+; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 1000
+; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 1200
+; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 1400
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP5]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP6]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP7]]
+; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP5]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP6]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP7]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP8]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP9]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = load double, ptr [[TMP8]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP9]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = load double, ptr [[TMP10]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = load double, ptr [[TMP11]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP20:%.*]] = insertelement <4 x double> poison, double [[TMP16]], i32 0
+; UNROLL-NO-IC-NEXT: [[TMP21:%.*]] = insertelement <4 x double> [[TMP20]], double [[TMP17]], i32 1
+; UNROLL-NO-IC-NEXT: [[TMP22:%.*]] = insertelement <4 x double> [[TMP21]], double [[TMP18]], i32 2
+; UNROLL-NO-IC-NEXT: [[TMP23:%.*]] = insertelement <4 x double> [[TMP22]], double [[TMP19]], i32 3
+; UNROLL-NO-IC-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP12]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP25:%.*]] = load double, ptr [[TMP13]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP26:%.*]] = load double, ptr [[TMP14]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP27:%.*]] = load double, ptr [[TMP15]], align 8
; UNROLL-NO-IC-NEXT: [[TMP28:%.*]] = insertelement <4 x double> poison, double [[TMP24]], i32 0
; UNROLL-NO-IC-NEXT: [[TMP29:%.*]] = insertelement <4 x double> [[TMP28]], double [[TMP25]], i32 1
; UNROLL-NO-IC-NEXT: [[TMP30:%.*]] = insertelement <4 x double> [[TMP29]], double [[TMP26]], i32 2
-; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = insertelement <4 x double> [[TMP30]], double [[TMP27]], i32 3
-; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = load double, ptr [[TMP20]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = load double, ptr [[TMP21]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = load double, ptr [[TMP22]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP35:%.*]] = load double, ptr [[TMP23]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP36:%.*]] = insertelement <4 x double> poison, double [[TMP32]], i32 0
-; UNROLL-NO-IC-NEXT: [[TMP37:%.*]] = insertelement <4 x double> [[TMP36]], double [[TMP33]], i32 1
-; UNROLL-NO-IC-NEXT: [[TMP38:%.*]] = insertelement <4 x double> [[TMP37]], double [[TMP34]], i32 2
-; UNROLL-NO-IC-NEXT: [[TMP39]] = insertelement <4 x double> [[TMP38]], double [[TMP35]], i32 3
-; UNROLL-NO-IC-NEXT: [[TMP40:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP31]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL-NO-IC-NEXT: [[TMP41:%.*]] = shufflevector <4 x double> [[TMP31]], <4 x double> [[TMP39]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL-NO-IC-NEXT: [[TMP42:%.*]] = fmul <4 x double> [[TMP40]], [[TMP31]]
-; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = fmul <4 x double> [[TMP41]], [[TMP39]]
-; UNROLL-NO-IC-NEXT: [[TMP44:%.*]] = fcmp une <4 x double> [[TMP42]], zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP45:%.*]] = fcmp une <4 x double> [[TMP43]], zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP46:%.*]] = zext <4 x i1> [[TMP44]] to <4 x i32>
-; UNROLL-NO-IC-NEXT: [[TMP47:%.*]] = zext <4 x i1> [[TMP45]] to <4 x i32>
-; UNROLL-NO-IC-NEXT: [[TMP48]] = add <4 x i32> [[VEC_PHI]], [[TMP46]]
-; UNROLL-NO-IC-NEXT: [[TMP49]] = add <4 x i32> [[VEC_PHI9]], [[TMP47]]
+; UNROLL-NO-IC-NEXT: [[TMP31]] = insertelement <4 x double> [[TMP30]], double [[TMP27]], i32 3
+; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP23]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = shufflevector <4 x double> [[TMP23]], <4 x double> [[TMP31]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = fmul <4 x double> [[TMP32]], [[TMP23]]
+; UNROLL-NO-IC-NEXT: [[TMP35:%.*]] = fmul <4 x double> [[TMP33]], [[TMP31]]
+; UNROLL-NO-IC-NEXT: [[TMP36:%.*]] = fcmp une <4 x double> [[TMP34]], zeroinitializer
+; UNROLL-NO-IC-NEXT: [[TMP37:%.*]] = fcmp une <4 x double> [[TMP35]], zeroinitializer
+; UNROLL-NO-IC-NEXT: [[TMP38:%.*]] = zext <4 x i1> [[TMP36]] to <4 x i32>
+; UNROLL-NO-IC-NEXT: [[TMP39:%.*]] = zext <4 x i1> [[TMP37]] to <4 x i32>
+; UNROLL-NO-IC-NEXT: [[TMP40]] = add <4 x i32> [[VEC_PHI]], [[TMP38]]
+; UNROLL-NO-IC-NEXT: [[TMP41]] = add <4 x i32> [[VEC_PHI2]], [[TMP39]]
; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; UNROLL-NO-IC-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
-; UNROLL-NO-IC-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; UNROLL-NO-IC-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
+; UNROLL-NO-IC-NEXT: br i1 [[TMP42]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; UNROLL-NO-IC: middle.block:
-; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP49]], [[TMP48]]
-; UNROLL-NO-IC-NEXT: [[TMP51:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
-; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP39]], i32 3
+; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP41]], [[TMP40]]
+; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
+; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP31]], i32 3
; UNROLL-NO-IC-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
; UNROLL-NO-IC-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 10240, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
-; UNROLL-NO-IC-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
+; UNROLL-NO-IC-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP43]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: br label [[FOR_BODY:%.*]]
; UNROLL-NO-IC: for.cond.cleanup:
-; UNROLL-NO-IC-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
+; UNROLL-NO-IC-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP43]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: ret i32 [[A_1_LCSSA]]
; UNROLL-NO-IC: for.body:
; UNROLL-NO-IC-NEXT: [[B_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[I_011:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[A_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[A_1]], [[FOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP52:%.*]], [[FOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP44:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[B_ADDR_012]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP52]] = load double, ptr [[ARRAYIDX]], align 8
-; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP52]]
+; UNROLL-NO-IC-NEXT: [[TMP44]] = load double, ptr [[ARRAYIDX]], align 8
+; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP44]]
; UNROLL-NO-IC-NEXT: [[TOBOOL:%.*]] = fcmp une double [[MUL]], 0.000000e+00
; UNROLL-NO-IC-NEXT: [[INC:%.*]] = zext i1 [[TOBOOL]] to i32
; UNROLL-NO-IC-NEXT: [[A_1]] = add nsw i32 [[A_010]], [[INC]]
@@ -1640,35 +1633,34 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; UNROLL-NO-VF-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NO-VF: vector.body:
; UNROLL-NO-VF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VEC_PHI3:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VECTOR_RECUR:%.*]] = phi double [ [[J:%.*]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; UNROLL-NO-VF-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 200
-; UNROLL-NO-VF-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
-; UNROLL-NO-VF-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1
-; UNROLL-NO-VF-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 200
-; UNROLL-NO-VF-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
-; UNROLL-NO-VF-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; UNROLL-NO-VF-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
-; UNROLL-NO-VF-NEXT: [[TMP6:%.*]] = load double, ptr [[TMP4]], align 8
-; UNROLL-NO-VF-NEXT: [[TMP7]] = load double, ptr [[TMP5]], align 8
-; UNROLL-NO-VF-NEXT: [[TMP8:%.*]] = fmul double [[VECTOR_RECUR]], [[TMP6]]
-; UNROLL-NO-VF-NEXT: [[TMP9:%.*]] = fmul double [[TMP6]], [[TMP7]]
-; UNROLL-NO-VF-NEXT: [[TMP10:%.*]] = fcmp une double [[TMP8]], 0.000000e+00
-; UNROLL-NO-VF-NEXT: [[TMP11:%.*]] = fcmp une double [[TMP9]], 0.000000e+00
-; UNROLL-NO-VF-NEXT: [[TMP12:%.*]] = zext i1 [[TMP10]] to i32
-; UNROLL-NO-VF-NEXT: [[TMP13:%.*]] = zext i1 [[TMP11]] to i32
-; UNROLL-NO-VF-NEXT: [[TMP14]] = add i32 [[VEC_PHI]], [[TMP12]]
-; UNROLL-NO-VF-NEXT: [[TMP15]] = add i32 [[VEC_PHI3]], [[TMP13]]
+; UNROLL-NO-VF-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[VECTOR_RECUR:%.*]] = phi double [ [[J:%.*]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 200
+; UNROLL-NO-VF-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
+; UNROLL-NO-VF-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 200
+; UNROLL-NO-VF-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
+; UNROLL-NO-VF-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; UNROLL-NO-VF-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; UNROLL-NO-VF-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
+; UNROLL-NO-VF-NEXT: [[TMP4:%.*]] = load double, ptr [[TMP2]], align 8
+; UNROLL-NO-VF-NEXT: [[TMP5]] = load double, ptr [[TMP3]], align 8
+; UNROLL-NO-VF-NEXT: [[TMP6:%.*]] = fmul double [[VECTOR_RECUR]], [[TMP4]]
+; UNROLL-NO-VF-NEXT: [[TMP7:%.*]] = fmul double [[TMP4]], [[TMP5]]
+; UNROLL-NO-VF-NEXT: [[TMP8:%.*]] = fcmp une double [[TMP6]], 0.000000e+00
+; UNROLL-NO-VF-NEXT: [[TMP9:%.*]] = fcmp une double [[TMP7]], 0.000000e+00
+; UNROLL-NO-VF-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32
+; UNROLL-NO-VF-NEXT: [[TMP11:%.*]] = zext i1 [[TMP9]] to i32
+; UNROLL-NO-VF-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP10]]
+; UNROLL-NO-VF-NEXT: [[TMP13]] = add i32 [[VEC_PHI2]], [[TMP11]]
; UNROLL-NO-VF-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; UNROLL-NO-VF-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
-; UNROLL-NO-VF-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; UNROLL-NO-VF-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
+; UNROLL-NO-VF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; UNROLL-NO-VF: middle.block:
-; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP14]]
+; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP12]]
; UNROLL-NO-VF-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; UNROLL-NO-VF: scalar.ph:
-; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
+; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-VF-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
; UNROLL-NO-VF-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 10240, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
; UNROLL-NO-VF-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
@@ -1680,10 +1672,10 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; UNROLL-NO-VF-NEXT: [[B_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-VF-NEXT: [[I_011:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-VF-NEXT: [[A_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[A_1]], [[FOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP17:%.*]], [[FOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP15:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-VF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[B_ADDR_012]], i64 [[IDXPROM]]
-; UNROLL-NO-VF-NEXT: [[TMP17]] = load double, ptr [[ARRAYIDX]], align 8
-; UNROLL-NO-VF-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP17]]
+; UNROLL-NO-VF-NEXT: [[TMP15]] = load double, ptr [[ARRAYIDX]], align 8
+; UNROLL-NO-VF-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP15]]
; UNROLL-NO-VF-NEXT: [[TOBOOL:%.*]] = fcmp une double [[MUL]], 0.000000e+00
; UNROLL-NO-VF-NEXT: [[INC:%.*]] = zext i1 [[TOBOOL]] to i32
; UNROLL-NO-VF-NEXT: [[A_1]] = add nsw i32 [[A_010]], [[INC]]
@@ -1702,61 +1694,58 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; SINK-AFTER-NEXT: br label [[VECTOR_BODY:%.*]]
; SINK-AFTER: vector.body:
; SINK-AFTER-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SINK-AFTER-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
-; SINK-AFTER-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
-; SINK-AFTER-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; SINK-AFTER-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
-; SINK-AFTER-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1
-; SINK-AFTER-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
-; SINK-AFTER-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2
-; SINK-AFTER-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP5]]
-; SINK-AFTER-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 3
-; SINK-AFTER-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP7]]
-; SINK-AFTER-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP12:%.*]] = load double, ptr [[TMP8]], align 8
-; SINK-AFTER-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP9]], align 8
-; SINK-AFTER-NEXT: [[TMP14:%.*]] = load double, ptr [[TMP10]], align 8
-; SINK-AFTER-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP11]], align 8
-; SINK-AFTER-NEXT: [[TMP16:%.*]] = insertelement <4 x double> poison, double [[TMP12]], i32 0
-; SINK-AFTER-NEXT: [[TMP17:%.*]] = insertelement <4 x double> [[TMP16]], double [[TMP13]], i32 1
-; SINK-AFTER-NEXT: [[TMP18:%.*]] = insertelement <4 x double> [[TMP17]], double [[TMP14]], i32 2
-; SINK-AFTER-NEXT: [[TMP19]] = insertelement <4 x double> [[TMP18]], double [[TMP15]], i32 3
-; SINK-AFTER-NEXT: [[TMP20:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP19]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; SINK-AFTER-NEXT: [[TMP21:%.*]] = fmul <4 x double> [[TMP20]], [[TMP19]]
-; SINK-AFTER-NEXT: [[TMP22:%.*]] = fcmp une <4 x double> [[TMP21]], zeroinitializer
-; SINK-AFTER-NEXT: [[TMP23:%.*]] = zext <4 x i1> [[TMP22]] to <4 x i32>
-; SINK-AFTER-NEXT: [[TMP24]] = add <4 x i32> [[VEC_PHI]], [[TMP23]]
+; SINK-AFTER-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
+; SINK-AFTER-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; SINK-AFTER-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 200
+; SINK-AFTER-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
+; SINK-AFTER-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 200
+; SINK-AFTER-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 400
+; SINK-AFTER-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 600
+; SINK-AFTER-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
+; SINK-AFTER-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; SINK-AFTER-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; SINK-AFTER-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
+; SINK-AFTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8
+; SINK-AFTER-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8
+; SINK-AFTER-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8
+; SINK-AFTER-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8
+; SINK-AFTER-NEXT: [[TMP12:%.*]] = insertelement <4 x double> poison, double [[TMP8]], i32 0
+; SINK-AFTER-NEXT: [[TMP13:%.*]] = insertelement <4 x double> [[TMP12]], double [[TMP9]], i32 1
+; SINK-AFTER-NEXT: [[TMP14:%.*]] = insertelement <4 x double> [[TMP13]], double [[TMP10]], i32 2
+; SINK-AFTER-NEXT: [[TMP15]] = insertelement <4 x double> [[TMP14]], double [[TMP11]], i32 3
+; SINK-AFTER-NEXT: [[TMP16:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP15]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; SINK-AFTER-NEXT: [[TMP17:%.*]] = fmul <4 x double> [[TMP16]], [[TMP15]]
+; SINK-AFTER-NEXT: [[TMP18:%.*]] = fcmp une <4 x double> [[TMP17]], zeroinitializer
+; SINK-AFTER-NEXT: [[TMP19:%.*]] = zext <4 x i1> [[TMP18]] to <4 x i32>
+; SINK-AFTER-NEXT: [[TMP20]] = add <4 x i32> [[VEC_PHI]], [[TMP19]]
; SINK-AFTER-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; SINK-AFTER-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
-; SINK-AFTER-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; SINK-AFTER-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
+; SINK-AFTER-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; SINK-AFTER: middle.block:
-; SINK-AFTER-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP19]], i32 3
+; SINK-AFTER-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP20]])
+; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP15]], i32 3
; SINK-AFTER-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; SINK-AFTER: scalar.ph:
; SINK-AFTER-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
; SINK-AFTER-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 10240, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
-; SINK-AFTER-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ]
+; SINK-AFTER-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: br label [[FOR_BODY:%.*]]
; SINK-AFTER: for.cond.cleanup:
-; SINK-AFTER-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ]
+; SINK-AFTER-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: ret i32 [[A_1_LCSSA]]
; SINK-AFTER: for.body:
; SINK-AFTER-NEXT: [[B_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
; SINK-AFTER-NEXT: [[I_011:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; SINK-AFTER-NEXT: [[A_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[A_1]], [[FOR_BODY]] ]
-; SINK-AFTER-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP27:%.*]], [[FOR_BODY]] ]
+; SINK-AFTER-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP23:%.*]], [[FOR_BODY]] ]
; SINK-AFTER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[B_ADDR_012]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP27]] = load double, ptr [[ARRAYIDX]], align 8
-; SINK-AFTER-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP27]]
+; SINK-AFTER-NEXT: [[TMP23]] = load double, ptr [[ARRAYIDX]], align 8
+; SINK-AFTER-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP23]]
; SINK-AFTER-NEXT: [[TOBOOL:%.*]] = fcmp une double [[MUL]], 0.000000e+00
; SINK-AFTER-NEXT: [[INC:%.*]] = zext i1 [[TOBOOL]] to i32
; SINK-AFTER-NEXT: [[A_1]] = add nsw i32 [[A_010]], [[INC]]
diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll
index 4109477..31d862a 100644
--- a/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll
@@ -32,35 +32,36 @@ define void @non_constant_scalar_expansion(i32 %0, ptr %call) {
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0
-; STRIDED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr null, i64 [[TMP5]]
-; STRIDED-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 1
-; STRIDED-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr null, i64 [[TMP7]]
-; STRIDED-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 2
-; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP9]]
-; STRIDED-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 3
-; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr null, i64 [[TMP11]]
; STRIDED-NEXT: [[DOTCAST:%.*]] = trunc i64 [[INDEX]] to i32
; STRIDED-NEXT: [[OFFSET_IDX:%.*]] = add i32 30, [[DOTCAST]]
-; STRIDED-NEXT: [[TMP12:%.*]] = add i32 [[OFFSET_IDX]], 0
-; STRIDED-NEXT: [[TMP13:%.*]] = add i32 [[OFFSET_IDX]], 1
-; STRIDED-NEXT: [[TMP14:%.*]] = add i32 [[OFFSET_IDX]], 2
-; STRIDED-NEXT: [[TMP15:%.*]] = add i32 [[OFFSET_IDX]], 3
-; STRIDED-NEXT: [[TMP16:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[TMP12]]
-; STRIDED-NEXT: [[TMP17:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP13]]
-; STRIDED-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP14]]
-; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP15]]
-; STRIDED-NEXT: store ptr [[NEXT_GEP]], ptr [[TMP16]], align 4
-; STRIDED-NEXT: store ptr [[NEXT_GEP2]], ptr [[TMP17]], align 4
-; STRIDED-NEXT: store ptr [[NEXT_GEP3]], ptr [[TMP18]], align 4
-; STRIDED-NEXT: store ptr [[NEXT_GEP4]], ptr [[TMP19]], align 4
+; STRIDED-NEXT: [[TMP3:%.*]] = add i32 [[OFFSET_IDX]], 0
+; STRIDED-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX]], 1
+; STRIDED-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX]], 2
+; STRIDED-NEXT: [[TMP6:%.*]] = add i32 [[OFFSET_IDX]], 3
+; STRIDED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], [[TMP1]]
+; STRIDED-NEXT: [[TMP7:%.*]] = mul i64 0, [[TMP1]]
+; STRIDED-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP7]]
+; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP1]]
+; STRIDED-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP9]]
+; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 2, [[TMP1]]
+; STRIDED-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP11]]
+; STRIDED-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP1]]
+; STRIDED-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP13]]
+; STRIDED-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr null, i64 [[TMP8]]
+; STRIDED-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr null, i64 [[TMP10]]
+; STRIDED-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr null, i64 [[TMP12]]
+; STRIDED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr null, i64 [[TMP14]]
+; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[TMP3]]
+; STRIDED-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP4]]
+; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP5]]
+; STRIDED-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP6]]
+; STRIDED-NEXT: store ptr [[TMP15]], ptr [[TMP19]], align 4
+; STRIDED-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
+; STRIDED-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
+; STRIDED-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 4
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; STRIDED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967264
-; STRIDED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; STRIDED-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967264
+; STRIDED-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; STRIDED: middle.block:
; STRIDED-NEXT: br i1 false, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; STRIDED: scalar.ph:
@@ -68,13 +69,13 @@ define void @non_constant_scalar_expansion(i32 %0, ptr %call) {
; STRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ null, [[ENTRY]] ]
; STRIDED-NEXT: br label [[FOR_COND:%.*]]
; STRIDED: for.cond:
-; STRIDED-NEXT: [[TMP21:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
+; STRIDED-NEXT: [[TMP24:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
; STRIDED-NEXT: [[P_0:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_COND]] ]
; STRIDED-NEXT: [[ADD_PTR]] = getelementptr i8, ptr [[P_0]], i32 [[MUL]]
-; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP21]]
+; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP24]]
; STRIDED-NEXT: store ptr [[P_0]], ptr [[ARRAYIDX]], align 4
-; STRIDED-NEXT: [[INC]] = add i32 [[TMP21]], 1
-; STRIDED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP21]], 0
+; STRIDED-NEXT: [[INC]] = add i32 [[TMP24]], 1
+; STRIDED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP24]], 0
; STRIDED-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_END]], label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
; STRIDED: for.end:
; STRIDED-NEXT: ret void
diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
index 27f6f7b..3503796 100644
--- a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
@@ -23,56 +23,53 @@ define void @a(ptr readnone %b) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE10:%.*]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], -1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr null, i64 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 -3
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP6]], align 1
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 0, [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr null, i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 -3
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1
; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i8> [[WIDE_LOAD]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq <4 x i8> [[REVERSE]], zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP7]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0
-; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <4 x i8> [[REVERSE]], zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP6]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0
+; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP10]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP9]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1
-; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1
+; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
; CHECK: pred.store.if5:
-; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], -1
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr null, i64 [[TMP13]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP2]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP14]], align 1
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], -1
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr null, i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP2]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP12]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
; CHECK: pred.store.continue6:
-; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2
-; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2
+; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
; CHECK: pred.store.if7:
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], -1
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP17]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP3]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP18]], align 1
+; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], -2
+; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP3]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP15]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
; CHECK: pred.store.continue8:
-; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3
-; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10]]
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3
+; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10]]
; CHECK: pred.store.if9:
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], -1
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr null, i64 [[TMP21]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP4]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP22]], align 1
+; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], -3
+; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr null, i64 [[TMP17]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP4]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP18]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
; CHECK: pred.store.continue10:
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
@@ -86,8 +83,8 @@ define void @a(ptr readnone %b) {
; CHECK: for.body:
; CHECK-NEXT: [[C_05:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[C_05]], i64 -1
-; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1
-; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP24]], 0
+; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP20]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: store i8 95, ptr [[INCDEC_PTR]], align 1
@@ -145,22 +142,22 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP2]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 1, i64 2, i64 3>
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, <4 x ptr> [[TMP3]], i64 1
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: store <4 x ptr> [[TMP4]], ptr [[TMP5]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[TMP3]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
-; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1>
-; CHECK-NEXT: store <4 x i8> [[TMP8]], ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, <4 x ptr> [[TMP1]], i64 1
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
+; CHECK-NEXT: store <4 x ptr> [[TMP3]], ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP6]], align 1
+; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: store <4 x i8> [[TMP7]], ptr [[TMP6]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 4
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
index 3ba5782..873f636 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
@@ -850,8 +850,8 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]]
; CHECK: pred.load.continue6:
; CHECK-NEXT: [[TMP24:%.*]] = phi <4 x float> [ [[TMP19]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP23]], [[PRED_LOAD_IF5]] ]
-; CHECK-NEXT: [[TMP25:%.*]] = select <4 x i1> [[TMP4]], <4 x float> [[TMP24]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>
-; CHECK-NEXT: [[PREDPHI:%.*]] = fadd fast <4 x float> [[VEC_PHI]], [[TMP25]]
+; CHECK-NEXT: [[TMP25:%.*]] = fadd fast <4 x float> [[TMP24]], [[VEC_PHI]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x float> [[TMP25]], <4 x float> [[VEC_PHI]]
; CHECK-NEXT: [[TMP26:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD]], <float 7.000000e+00, float 7.000000e+00, float 7.000000e+00, float 7.000000e+00>
; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i1> [[TMP26]], i64 0
; CHECK-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]]
@@ -889,8 +889,8 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE14]]
; CHECK: pred.load.continue14:
; CHECK-NEXT: [[TMP46:%.*]] = phi <4 x float> [ [[TMP41]], [[PRED_LOAD_CONTINUE12]] ], [ [[TMP45]], [[PRED_LOAD_IF13]] ]
-; CHECK-NEXT: [[TMP47:%.*]] = select <4 x i1> [[TMP26]], <4 x float> [[TMP46]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>
-; CHECK-NEXT: [[PREDPHI15]] = fadd fast <4 x float> [[PREDPHI]], [[TMP47]]
+; CHECK-NEXT: [[TMP47:%.*]] = fadd fast <4 x float> [[TMP46]], [[PREDPHI]]
+; CHECK-NEXT: [[PREDPHI15]] = select <4 x i1> [[TMP26]], <4 x float> [[TMP47]], <4 x float> [[PREDPHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
index b1c5ccb..e79f098 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
@@ -65,7 +65,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ poison, [[DOTLR_PH]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
@@ -1173,7 +1173,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_LOAD_CONTINUE6]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[PRED_LOAD_CONTINUE6]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[PRED_LOAD_CONTINUE6]] ]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], <i64 257, i64 257, i64 257, i64 257>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
@@ -1216,11 +1216,11 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26]] = call i32 @llvm.smin.i32(i32 [[TMP25]], i32 [[VEC_PHI]])
+; CHECK-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP25]], i32 [[VEC_PHI]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
-; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
-; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -1228,7 +1228,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK: for.body:
; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
;
entry:
@@ -1260,7 +1260,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_LOAD_CONTINUE6]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[PRED_LOAD_CONTINUE6]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[PRED_LOAD_CONTINUE6]] ]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], <i64 257, i64 257, i64 257, i64 257>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
@@ -1303,11 +1303,11 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26]] = call i32 @llvm.umax.i32(i32 [[TMP25]], i32 [[VEC_PHI]])
+; CHECK-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umax.i32(i32 [[TMP25]], i32 [[VEC_PHI]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
-; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
-; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -1315,7 +1315,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK: for.body:
; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
;
entry:
@@ -1351,25 +1351,25 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ [[TMP0]], [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; CHECK-NEXT: [[TMP6:%.*]] = fcmp ule <4 x float> [[WIDE_LOAD1]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
-; CHECK-NEXT: [[TMP8:%.*]] = and <4 x i1> [[TMP5]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
-; CHECK-NEXT: [[TMP9:%.*]] = and <4 x i1> [[TMP8]], [[TMP7]]
-; CHECK-NEXT: [[TMP10:%.*]] = xor <4 x i1> [[TMP7]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP11:%.*]] = and <4 x i1> [[TMP8]], [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[PREDPHI_V:%.*]] = select <4 x i1> [[TMP9]], <4 x float> [[WIDE_LOAD1]], <4 x float> [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> [[TMP12]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP11]]
-; CHECK-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP13]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[PREDPHI_V]]
-; CHECK-NEXT: [[PREDPHI3]] = fadd fast <4 x float> [[VEC_PHI]], [[PREDPHI2]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; CHECK-NEXT: [[TMP4:%.*]] = fcmp ule <4 x float> [[WIDE_LOAD1]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+; CHECK-NEXT: [[TMP7:%.*]] = and <4 x i1> [[TMP5]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP6]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP9:%.*]] = and <4 x i1> [[TMP5]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = xor <4 x i1> [[TMP3]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[PREDPHI_V:%.*]] = select <4 x i1> [[TMP7]], <4 x float> [[WIDE_LOAD1]], <4 x float> [[WIDE_LOAD]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = fadd fast <4 x float> [[VEC_PHI]], [[PREDPHI_V]]
+; CHECK-NEXT: [[TMP11:%.*]] = select <4 x i1> [[TMP10]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP9]]
+; CHECK-NEXT: [[PREDPHI3]] = select <4 x i1> [[TMP11]], <4 x float> [[VEC_PHI]], <4 x float> [[PREDPHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[TMP15:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI3]])
+; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI3]])
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
@@ -1386,7 +1386,7 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK: for.inc:
; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ poison, [[FOR_INC]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ poison, [[FOR_INC]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[SUM_1_LCSSA]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index d852411..204d021 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -690,16 +690,16 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp ule <4 x float> [[WIDE_LOAD1]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
-; CHECK-NEXT: [[TMP6:%.*]] = and <4 x i1> [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
-; CHECK-NEXT: [[TMP7:%.*]] = and <4 x i1> [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP9:%.*]] = and <4 x i1> [[TMP6]], [[TMP8]]
+; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+; CHECK-NEXT: [[TMP7:%.*]] = and <4 x i1> [[TMP5]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP6]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP9:%.*]] = and <4 x i1> [[TMP5]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = xor <4 x i1> [[TMP3]], <i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: [[PREDPHI_V:%.*]] = select <4 x i1> [[TMP7]], <4 x float> [[WIDE_LOAD1]], <4 x float> [[WIDE_LOAD]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = fadd fast <4 x float> [[VEC_PHI]], [[PREDPHI_V]]
; CHECK-NEXT: [[TMP11:%.*]] = select <4 x i1> [[TMP10]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP9]]
-; CHECK-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP11]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[PREDPHI_V]]
-; CHECK-NEXT: [[PREDPHI3]] = fadd fast <4 x float> [[VEC_PHI]], [[PREDPHI2]]
+; CHECK-NEXT: [[PREDPHI3]] = select <4 x i1> [[TMP11]], <4 x float> [[VEC_PHI]], <4 x float> [[PREDPHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
@@ -1354,9 +1354,8 @@ define i32 @predicated_or_dominates_reduction(ptr %b) {
; CHECK: pred.load.continue6:
; CHECK-NEXT: [[TMP43:%.*]] = phi <4 x i32> [ [[TMP37]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP42]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP44:%.*]] = icmp ne <4 x i32> [[TMP43]], zeroinitializer
-; CHECK-NEXT: [[TMP45:%.*]] = select <4 x i1> [[TMP19]], <4 x i1> [[TMP44]], <4 x i1> zeroinitializer
; CHECK-NEXT: [[TMP46:%.*]] = xor <4 x i1> [[TMP19]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP47:%.*]] = or <4 x i1> [[TMP45]], [[TMP46]]
+; CHECK-NEXT: [[TMP47:%.*]] = select <4 x i1> [[TMP46]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP44]]
; CHECK-NEXT: [[TMP48:%.*]] = bitcast <4 x i1> [[TMP47]] to i4
; CHECK-NEXT: [[TMP49:%.*]] = call i4 @llvm.ctpop.i4(i4 [[TMP48]]), !range [[RNG42:![0-9]+]]
; CHECK-NEXT: [[TMP50:%.*]] = zext nneg i4 [[TMP49]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll
index ba82bac..a47a385 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction.ll
@@ -761,16 +761,16 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp ule <4 x float> [[WIDE_LOAD1]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
-; CHECK-NEXT: [[TMP6:%.*]] = and <4 x i1> [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
-; CHECK-NEXT: [[TMP7:%.*]] = and <4 x i1> [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP9:%.*]] = and <4 x i1> [[TMP6]], [[TMP8]]
+; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+; CHECK-NEXT: [[TMP7:%.*]] = and <4 x i1> [[TMP5]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP6]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP9:%.*]] = and <4 x i1> [[TMP5]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = xor <4 x i1> [[TMP3]], <i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: [[PREDPHI_V:%.*]] = select <4 x i1> [[TMP7]], <4 x float> [[WIDE_LOAD1]], <4 x float> [[WIDE_LOAD]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = fadd fast <4 x float> [[VEC_PHI]], [[PREDPHI_V]]
; CHECK-NEXT: [[TMP11:%.*]] = select <4 x i1> [[TMP10]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP9]]
-; CHECK-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP11]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[PREDPHI_V]]
-; CHECK-NEXT: [[PREDPHI3]] = fadd fast <4 x float> [[VEC_PHI]], [[PREDPHI2]]
+; CHECK-NEXT: [[PREDPHI3]] = select <4 x i1> [[TMP11]], <4 x float> [[VEC_PHI]], <4 x float> [[PREDPHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
index 0c659a5..92ca77b 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
@@ -92,6 +92,7 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE12:%.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
; CHECK-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1
; CHECK-NEXT: [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2
@@ -102,39 +103,35 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i64 [[VEC_IV6]], 14
; CHECK-NEXT: br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP4]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
; CHECK: pred.store.if7:
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8
-; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 8
+; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP5]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP1]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
; CHECK: pred.store.continue8:
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
; CHECK: pred.store.if9:
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 8
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 16
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP6]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP2]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
; CHECK: pred.store.continue10:
; CHECK-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12]]
; CHECK: pred.store.if11:
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 8
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 24
+; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP7]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP3]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
; CHECK: pred.store.continue12:
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
index 1dddbfe..ca9dfdc 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
@@ -23,11 +23,12 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
-; CHECK-NEXT: EMIT ir<%p.src> = WIDEN-POINTER-INDUCTION ir<%A>, 1
-; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
+; CHECK-NEXT: EMIT vp<[[PADD:%.+]]> = ptradd ir<%A>, vp<[[STEPS]]>
+; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VPTR]]>
; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%l>, ir<10>
-; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN store vp<[[VPTR2]]>, ir<%add>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV:%.+]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VTC]]>
@@ -54,11 +55,12 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
-; CHECK-NEXT: EMIT ir<%p.src> = WIDEN-POINTER-INDUCTION ir<%A>, 1
-; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
+; CHECK-NEXT: EMIT vp<[[PADD:%.+]]> = ptradd ir<%A>, vp<[[STEPS]]>
+; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VPTR]]>
; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%l>, ir<10>
-; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN store vp<[[VPTR2]]>, ir<%add>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV:%.+]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-cond ir<true>
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
index 9b9c3e7..89b3a6d 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
@@ -1113,8 +1113,10 @@ define void @ptr_induction_remove_dead_recipe(ptr %start, ptr %end) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
-; CHECK-NEXT: EMIT ir<%ptr.iv> = WIDEN-POINTER-INDUCTION ir<%start>, -1
-; CHECK-NEXT: CLONE ir<%ptr.iv.next> = getelementptr inbounds ir<%ptr.iv>, ir<-1>
+; CHECK-NEXT: vp<[[DEV_IV:%.+]]> = DERIVED-IV ir<0> + vp<%3> * ir<-1>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DEV_IV]]>, ir<-1>
+; CHECK-NEXT: EMIT vp<[[PTR_IV:%.+]]> = ptradd ir<%start>, vp<[[STEPS]]>
+; CHECK-NEXT: CLONE ir<%ptr.iv.next> = getelementptr inbounds vp<[[PTR_IV]]>, ir<-1>
; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer (reverse) ir<%ptr.iv.next>
; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VEC_PTR]]>
; CHECK-NEXT: WIDEN ir<%c.1> = icmp eq ir<%l>, ir<0>
@@ -1127,7 +1129,7 @@ define void @ptr_induction_remove_dead_recipe(ptr %start, ptr %end) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
-; CHECK-NEXT: REPLICATE ir<%ptr.iv.next> = getelementptr inbounds ir<%ptr.iv>, ir<-1>
+; CHECK-NEXT: REPLICATE ir<%ptr.iv.next> = getelementptr inbounds vp<[[PTR_IV]]>, ir<-1>
; CHECK-NEXT: REPLICATE store ir<95>, ir<%ptr.iv.next>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
index ef8665b..bdd0c6f 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
@@ -6,15 +6,11 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; Tests to make sure no loads are introduced after a lifetime.end by multiply
; fusion.
-; FIXME: Currently the tests are mis-compiled, with loads being introduced after
-; llvm.lifetime.end calls.
-
define void @lifetime_for_first_arg_before_multiply(ptr noalias %B, ptr noalias %C) {
; CHECK-LABEL: @lifetime_for_first_arg_before_multiply(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca <4 x double>, align 32
; CHECK-NEXT: call void @init(ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
@@ -77,6 +73,7 @@ define void @lifetime_for_first_arg_before_multiply(ptr noalias %B, ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -95,7 +92,6 @@ define void @lifetime_for_second_arg_before_multiply(ptr noalias %A, ptr noalias
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B:%.*]] = alloca <4 x double>, align 32
; CHECK-NEXT: call void @init(ptr [[B]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A:%.*]], i64 0
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
@@ -158,6 +154,7 @@ define void @lifetime_for_second_arg_before_multiply(ptr noalias %A, ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
@@ -177,7 +174,6 @@ define void @lifetime_for_first_arg_before_multiply_load_from_offset(ptr noalias
; CHECK-NEXT: [[A:%.*]] = alloca <8 x double>, align 64
; CHECK-NEXT: call void @init(ptr [[A]])
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr i8, ptr [[A]], i64 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[GEP_8]], i64 0
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
@@ -240,6 +236,7 @@ define void @lifetime_for_first_arg_before_multiply_load_from_offset(ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -261,7 +258,6 @@ define void @lifetime_for_first_arg_before_multiply_lifetime_does_not_dominate(p
; CHECK-NEXT: call void @init(ptr [[A]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
@@ -352,7 +348,6 @@ define void @lifetime_for_second_arg_before_multiply_lifetime_does_not_dominate(
; CHECK-NEXT: call void @init(ptr [[B]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A:%.*]], i64 0
@@ -441,10 +436,9 @@ define void @lifetime_for_ptr_first_arg_before_multiply(ptr noalias %A, ptr noal
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A:%.*]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A:%.*]], i64 0
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
@@ -528,15 +522,104 @@ define void @lifetime_for_both_ptr_args_before_multiply(ptr noalias %A, ptr noal
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B:%.*]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A:%.*]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A:%.*]], i64 0
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B:%.*]], i64 0
+; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 2
+; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x double>, ptr [[VEC_GEP3]], align 8
+; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> poison, double [[TMP2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fmul contract <1 x double> [[BLOCK]], [[SPLAT_SPLAT]]
+; CHECK-NEXT: [[BLOCK5:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x double> poison, double [[TMP4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT6]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK5]], <1 x double> [[SPLAT_SPLAT7]], <1 x double> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <1 x double> [[TMP5]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP6]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK8:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x double> poison, double [[TMP8]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT10]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = fmul contract <1 x double> [[BLOCK9]], [[SPLAT_SPLAT11]]
+; CHECK-NEXT: [[BLOCK12:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <1 x double> poison, double [[TMP10]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT13]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK12]], <1 x double> [[SPLAT_SPLAT14]], <1 x double> [[TMP9]])
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> [[TMP12]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[BLOCK15:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT16:%.*]] = insertelement <1 x double> poison, double [[TMP14]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT17:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT16]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = fmul contract <1 x double> [[BLOCK15]], [[SPLAT_SPLAT17]]
+; CHECK-NEXT: [[BLOCK18:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT19:%.*]] = insertelement <1 x double> poison, double [[TMP16]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT20:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT19]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK18]], <1 x double> [[SPLAT_SPLAT20]], <1 x double> [[TMP15]])
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <1 x double> [[TMP17]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP18]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK21:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK22:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> poison, double [[TMP20]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = fmul contract <1 x double> [[BLOCK22]], [[SPLAT_SPLAT24]]
+; CHECK-NEXT: [[BLOCK25:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT26:%.*]] = insertelement <1 x double> poison, double [[TMP22]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT27:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT26]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK25]], <1 x double> [[SPLAT_SPLAT27]], <1 x double> [[TMP21]])
+; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <1 x double> [[TMP23]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> [[TMP24]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[C1:%.*]], i64 0
+; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
+; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = load <4 x double>, ptr %A, align 8
+ %b = load <4 x double>, ptr %B, align 8
+ br i1 %c.0, label %then, label %exit
+
+then:
+ call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(i64 -1, ptr %A)
+ br label %exit
+
+exit:
+ %m = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+ store <4 x double> %m, ptr %C, align 8
+ ret void
+}
+
+define void @multiple_unrelated_lifetimes(ptr noalias %A, ptr noalias %B, ptr noalias %C, i1 %c.0) {
+; CHECK-LABEL: @multiple_unrelated_lifetimes(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ALLOC_1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[ALLOC_2:%.*]] = alloca i32, align 4
+; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
+; CHECK: then:
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_2]])
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A:%.*]], i64 0
+; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
+; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B:%.*]], i64 0
; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 2
; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x double>, ptr [[VEC_GEP3]], align 8
@@ -597,13 +680,17 @@ define void @lifetime_for_both_ptr_args_before_multiply(ptr noalias %A, ptr noal
; CHECK-NEXT: ret void
;
entry:
+ %alloc.1 = alloca i32
+ %alloc.2 = alloca i32
%a = load <4 x double>, ptr %A, align 8
%b = load <4 x double>, ptr %B, align 8
br i1 %c.0, label %then, label %exit
then:
call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(i64 -1, ptr %alloc.1)
call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(i64 -1, ptr %alloc.2)
br label %exit
exit:
@@ -618,7 +705,6 @@ define void @lifetime_for_ptr_select_before_multiply(ptr noalias %A, ptr noalias
; CHECK-NEXT: [[P:%.*]] = select i1 [[C_0:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[P]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[P]], i64 0
@@ -701,6 +787,374 @@ exit:
ret void
}
+define void @lifetimes_for_args_in_different_blocks(ptr noalias %B, ptr noalias %C, i1 %c.0) {
+; CHECK-LABEL: @lifetimes_for_args_in_different_blocks(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca <4 x double>, align 32
+; CHECK-NEXT: call void @init(ptr [[A]])
+; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
+; CHECK: then:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
+; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
+; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B:%.*]], i64 0
+; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 2
+; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x double>, ptr [[VEC_GEP3]], align 8
+; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> poison, double [[TMP2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fmul contract <1 x double> [[BLOCK]], [[SPLAT_SPLAT]]
+; CHECK-NEXT: [[BLOCK5:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x double> poison, double [[TMP4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT6]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK5]], <1 x double> [[SPLAT_SPLAT7]], <1 x double> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <1 x double> [[TMP5]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP6]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK8:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x double> poison, double [[TMP8]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT10]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = fmul contract <1 x double> [[BLOCK9]], [[SPLAT_SPLAT11]]
+; CHECK-NEXT: [[BLOCK12:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <1 x double> poison, double [[TMP10]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT13]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK12]], <1 x double> [[SPLAT_SPLAT14]], <1 x double> [[TMP9]])
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> [[TMP12]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[BLOCK15:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT16:%.*]] = insertelement <1 x double> poison, double [[TMP14]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT17:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT16]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = fmul contract <1 x double> [[BLOCK15]], [[SPLAT_SPLAT17]]
+; CHECK-NEXT: [[BLOCK18:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT19:%.*]] = insertelement <1 x double> poison, double [[TMP16]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT20:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT19]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK18]], <1 x double> [[SPLAT_SPLAT20]], <1 x double> [[TMP15]])
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <1 x double> [[TMP17]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP18]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK21:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK22:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> poison, double [[TMP20]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = fmul contract <1 x double> [[BLOCK22]], [[SPLAT_SPLAT24]]
+; CHECK-NEXT: [[BLOCK25:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT26:%.*]] = insertelement <1 x double> poison, double [[TMP22]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT27:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT26]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK25]], <1 x double> [[SPLAT_SPLAT27]], <1 x double> [[TMP21]])
+; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <1 x double> [[TMP23]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> [[TMP24]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[C1:%.*]], i64 0
+; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
+; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %A = alloca <4 x double>
+ call void @init(ptr %A)
+ br i1 %c.0, label %then, label %exit
+
+then:
+ %a = load <4 x double>, ptr %A, align 8
+ %b = load <4 x double>, ptr %B, align 8
+ %m = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+ store <4 x double> %m, ptr %C, align 8
+ br label %exit
+
+exit:
+ call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(i64 -1, ptr %B)
+ ret void
+}
+
+define void @lifetimes_for_args_in_different_blocks2(ptr noalias %B, ptr noalias %C, i1 %c.0) {
+; CHECK-LABEL: @lifetimes_for_args_in_different_blocks2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca <4 x double>, align 32
+; CHECK-NEXT: call void @init(ptr [[A]])
+; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
+; CHECK: then:
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B:%.*]])
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
+; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
+; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B]], i64 0
+; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 2
+; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x double>, ptr [[VEC_GEP3]], align 8
+; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> poison, double [[TMP2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fmul contract <1 x double> [[BLOCK]], [[SPLAT_SPLAT]]
+; CHECK-NEXT: [[BLOCK5:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x double> poison, double [[TMP4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT6]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK5]], <1 x double> [[SPLAT_SPLAT7]], <1 x double> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <1 x double> [[TMP5]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP6]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK8:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x double> poison, double [[TMP8]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT10]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = fmul contract <1 x double> [[BLOCK9]], [[SPLAT_SPLAT11]]
+; CHECK-NEXT: [[BLOCK12:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <1 x double> poison, double [[TMP10]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT13]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK12]], <1 x double> [[SPLAT_SPLAT14]], <1 x double> [[TMP9]])
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> [[TMP12]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[BLOCK15:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT16:%.*]] = insertelement <1 x double> poison, double [[TMP14]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT17:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT16]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = fmul contract <1 x double> [[BLOCK15]], [[SPLAT_SPLAT17]]
+; CHECK-NEXT: [[BLOCK18:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT19:%.*]] = insertelement <1 x double> poison, double [[TMP16]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT20:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT19]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK18]], <1 x double> [[SPLAT_SPLAT20]], <1 x double> [[TMP15]])
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <1 x double> [[TMP17]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP18]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK21:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK22:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> poison, double [[TMP20]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = fmul contract <1 x double> [[BLOCK22]], [[SPLAT_SPLAT24]]
+; CHECK-NEXT: [[BLOCK25:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT26:%.*]] = insertelement <1 x double> poison, double [[TMP22]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT27:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT26]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK25]], <1 x double> [[SPLAT_SPLAT27]], <1 x double> [[TMP21]])
+; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <1 x double> [[TMP23]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> [[TMP24]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[C1:%.*]], i64 0
+; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
+; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %A = alloca <4 x double>
+ call void @init(ptr %A)
+ br i1 %c.0, label %then, label %exit
+
+then:
+ call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(i64 -1, ptr %B)
+ br label %exit
+
+exit:
+ %a = load <4 x double>, ptr %A, align 8
+ %b = load <4 x double>, ptr %B, align 8
+ %m = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+ store <4 x double> %m, ptr %C, align 8
+ ret void
+}
+
+define void @lifetimes_for_args_load0_in_different_block(ptr noalias %B, ptr noalias %C, i1 %c.0) {
+; CHECK-LABEL: @lifetimes_for_args_load0_in_different_block(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca <4 x double>, align 32
+; CHECK-NEXT: call void @init(ptr [[A]])
+; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
+; CHECK: then:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
+; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
+; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B:%.*]], i64 0
+; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 2
+; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x double>, ptr [[VEC_GEP3]], align 8
+; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> poison, double [[TMP2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fmul contract <1 x double> [[BLOCK]], [[SPLAT_SPLAT]]
+; CHECK-NEXT: [[BLOCK5:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x double> poison, double [[TMP4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT6]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK5]], <1 x double> [[SPLAT_SPLAT7]], <1 x double> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <1 x double> [[TMP5]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP6]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK8:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x double> poison, double [[TMP8]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT10]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = fmul contract <1 x double> [[BLOCK9]], [[SPLAT_SPLAT11]]
+; CHECK-NEXT: [[BLOCK12:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <1 x double> poison, double [[TMP10]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT13]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK12]], <1 x double> [[SPLAT_SPLAT14]], <1 x double> [[TMP9]])
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> [[TMP12]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[BLOCK15:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT16:%.*]] = insertelement <1 x double> poison, double [[TMP14]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT17:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT16]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = fmul contract <1 x double> [[BLOCK15]], [[SPLAT_SPLAT17]]
+; CHECK-NEXT: [[BLOCK18:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT19:%.*]] = insertelement <1 x double> poison, double [[TMP16]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT20:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT19]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK18]], <1 x double> [[SPLAT_SPLAT20]], <1 x double> [[TMP15]])
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <1 x double> [[TMP17]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP18]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK21:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK22:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> poison, double [[TMP20]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = fmul contract <1 x double> [[BLOCK22]], [[SPLAT_SPLAT24]]
+; CHECK-NEXT: [[BLOCK25:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT26:%.*]] = insertelement <1 x double> poison, double [[TMP22]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT27:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT26]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK25]], <1 x double> [[SPLAT_SPLAT27]], <1 x double> [[TMP21]])
+; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <1 x double> [[TMP23]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> [[TMP24]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[C1:%.*]], i64 0
+; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
+; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %A = alloca <4 x double>
+ call void @init(ptr %A)
+ %a = load <4 x double>, ptr %A, align 8
+ call void @llvm.lifetime.end(i64 -1, ptr %A)
+ br i1 %c.0, label %then, label %exit
+
+then:
+ %b = load <4 x double>, ptr %B, align 8
+ %m = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+ store <4 x double> %m, ptr %C, align 8
+ br label %exit
+
+exit:
+ call void @llvm.lifetime.end(i64 -1, ptr %B)
+ ret void
+}
+
+define void @lifetimes_for_args_load1_in_different_block(ptr noalias %B, ptr noalias %C, i1 %c.0) {
+; CHECK-LABEL: @lifetimes_for_args_load1_in_different_block(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca <4 x double>, align 32
+; CHECK-NEXT: call void @init(ptr [[A]])
+; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
+; CHECK: then:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
+; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
+; CHECK-NEXT: [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B:%.*]], i64 0
+; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 2
+; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <2 x double>, ptr [[VEC_GEP3]], align 8
+; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> poison, double [[TMP2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fmul contract <1 x double> [[BLOCK]], [[SPLAT_SPLAT]]
+; CHECK-NEXT: [[BLOCK5:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x double> poison, double [[TMP4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT6]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK5]], <1 x double> [[SPLAT_SPLAT7]], <1 x double> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <1 x double> [[TMP5]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP6]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK8:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x double> poison, double [[TMP8]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT10]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = fmul contract <1 x double> [[BLOCK9]], [[SPLAT_SPLAT11]]
+; CHECK-NEXT: [[BLOCK12:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x double> [[COL_LOAD2]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <1 x double> poison, double [[TMP10]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT13]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK12]], <1 x double> [[SPLAT_SPLAT14]], <1 x double> [[TMP9]])
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> [[TMP12]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[BLOCK15:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT16:%.*]] = insertelement <1 x double> poison, double [[TMP14]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT17:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT16]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = fmul contract <1 x double> [[BLOCK15]], [[SPLAT_SPLAT17]]
+; CHECK-NEXT: [[BLOCK18:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT19:%.*]] = insertelement <1 x double> poison, double [[TMP16]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT20:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT19]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK18]], <1 x double> [[SPLAT_SPLAT20]], <1 x double> [[TMP15]])
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <1 x double> [[TMP17]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP18]], <2 x i32> <i32 2, i32 1>
+; CHECK-NEXT: [[BLOCK21:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[BLOCK22:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> poison, double [[TMP20]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = fmul contract <1 x double> [[BLOCK22]], [[SPLAT_SPLAT24]]
+; CHECK-NEXT: [[BLOCK25:%.*]] = shufflevector <2 x double> [[COL_LOAD1]], <2 x double> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[COL_LOAD4]], i64 1
+; CHECK-NEXT: [[SPLAT_SPLATINSERT26:%.*]] = insertelement <1 x double> poison, double [[TMP22]], i64 0
+; CHECK-NEXT: [[SPLAT_SPLAT27:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT26]], <1 x double> poison, <1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = call contract <1 x double> @llvm.fmuladd.v1f64(<1 x double> [[BLOCK25]], <1 x double> [[SPLAT_SPLAT27]], <1 x double> [[TMP21]])
+; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <1 x double> [[TMP23]], <1 x double> poison, <2 x i32> <i32 0, i32 poison>
+; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <2 x double> [[TMP19]], <2 x double> [[TMP24]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[C1:%.*]], i64 0
+; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
+; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %A = alloca <4 x double>
+ call void @init(ptr %A)
+ %b = load <4 x double>, ptr %B, align 8
+ call void @llvm.lifetime.end(i64 -1, ptr %B)
+ br i1 %c.0, label %then, label %exit
+
+then:
+ %a = load <4 x double>, ptr %A, align 8
+ %m = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+ store <4 x double> %m, ptr %C, align 8
+ br label %exit
+
+exit:
+ call void @llvm.lifetime.end(i64 -1, ptr %A)
+ ret void
+}
+
declare void @init(ptr)
declare void @llvm.lifetime.end(i64, ptr)
diff --git a/llvm/test/Transforms/MergeFunc/constexpr.ll b/llvm/test/Transforms/MergeFunc/constexpr.ll
index 9fb7806..3946fd6 100644
--- a/llvm/test/Transforms/MergeFunc/constexpr.ll
+++ b/llvm/test/Transforms/MergeFunc/constexpr.ll
@@ -66,9 +66,9 @@ define i64 @f8() unnamed_addr {
define ptr @f10() unnamed_addr {
; CHECK-LABEL: define ptr @f10() unnamed_addr {
-; CHECK-NEXT: ret ptr getelementptr ([4 x i32], ptr @g1, i64 0, inrange i64 1)
+; CHECK-NEXT: ret ptr getelementptr inrange(0, 4) ([4 x i32], ptr @g1, i64 0, i64 1)
;
- ret ptr getelementptr ([4 x i32], ptr @g1, i64 0, inrange i64 1)
+ ret ptr getelementptr inrange(0, 4) ([4 x i32], ptr @g1, i64 0, i64 1)
}
define ptr @f11() unnamed_addr {
@@ -80,7 +80,7 @@ define ptr @f11() unnamed_addr {
define ptr @f12() unnamed_addr {
; CHECK-LABEL: define ptr @f12() unnamed_addr {
-; CHECK-NEXT: ret ptr getelementptr ([4 x i32], ptr @g1, inrange i64 0, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inrange(-4, 12) ([4 x i32], ptr @g1, i64 0, i64 1)
;
- ret ptr getelementptr ([4 x i32], ptr @g1, inrange i64 0, i64 1)
+ ret ptr getelementptr inrange(-4, 12) ([4 x i32], ptr @g1, i64 0, i64 1)
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-25-DominatedLoop.ll b/llvm/test/Transforms/NewGVN/2007-07-25-DominatedLoop.ll
index 978f061..6f0ef19 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-25-DominatedLoop.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-25-DominatedLoop.ll
@@ -1,86 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
- %struct.PerlInterpreter = type { i8 }
+ %struct.PerlInterpreter = type { i8 }
@PL_sv_count = external global i32 ; <ptr> [#uses=2]
define void @perl_destruct(ptr %sv_interp) {
entry:
- br i1 false, label %cond_next25, label %cond_true16
+ br i1 false, label %cond_next25, label %cond_true16
cond_true16: ; preds = %entry
- ret void
+ ret void
cond_next25: ; preds = %entry
- br i1 false, label %cond_next33, label %cond_true32
+ br i1 false, label %cond_next33, label %cond_true32
cond_true32: ; preds = %cond_next25
- ret void
+ ret void
cond_next33: ; preds = %cond_next25
- br i1 false, label %cond_next61, label %cond_true.i46
+ br i1 false, label %cond_next61, label %cond_true.i46
cond_true.i46: ; preds = %cond_next33
- ret void
+ ret void
cond_next61: ; preds = %cond_next33
- br i1 false, label %cond_next69, label %cond_true66
+ br i1 false, label %cond_next69, label %cond_true66
cond_true66: ; preds = %cond_next61
- ret void
+ ret void
cond_next69: ; preds = %cond_next61
- br i1 false, label %Perl_safefree.exit52, label %cond_true.i50
+ br i1 false, label %Perl_safefree.exit52, label %cond_true.i50
cond_true.i50: ; preds = %cond_next69
- ret void
+ ret void
Perl_safefree.exit52: ; preds = %cond_next69
- br i1 false, label %cond_next80, label %cond_true77
+ br i1 false, label %cond_next80, label %cond_true77
cond_true77: ; preds = %Perl_safefree.exit52
- ret void
+ ret void
cond_next80: ; preds = %Perl_safefree.exit52
- br i1 false, label %Perl_safefree.exit56, label %cond_true.i54
+ br i1 false, label %Perl_safefree.exit56, label %cond_true.i54
cond_true.i54: ; preds = %cond_next80
- ret void
+ ret void
Perl_safefree.exit56: ; preds = %cond_next80
- br i1 false, label %Perl_safefree.exit60, label %cond_true.i58
+ br i1 false, label %Perl_safefree.exit60, label %cond_true.i58
cond_true.i58: ; preds = %Perl_safefree.exit56
- ret void
+ ret void
Perl_safefree.exit60: ; preds = %Perl_safefree.exit56
- br i1 false, label %Perl_safefree.exit64, label %cond_true.i62
+ br i1 false, label %Perl_safefree.exit64, label %cond_true.i62
cond_true.i62: ; preds = %Perl_safefree.exit60
- ret void
+ ret void
Perl_safefree.exit64: ; preds = %Perl_safefree.exit60
- br i1 false, label %Perl_safefree.exit68, label %cond_true.i66
+ br i1 false, label %Perl_safefree.exit68, label %cond_true.i66
cond_true.i66: ; preds = %Perl_safefree.exit64
- ret void
+ ret void
Perl_safefree.exit68: ; preds = %Perl_safefree.exit64
- br i1 false, label %cond_next150, label %cond_true23.i
+ br i1 false, label %cond_next150, label %cond_true23.i
cond_true23.i: ; preds = %Perl_safefree.exit68
- ret void
+ ret void
cond_next150: ; preds = %Perl_safefree.exit68
- %tmp16092 = load i32, ptr @PL_sv_count, align 4 ; <i32> [#uses=0]
- br label %cond_next165
+ %tmp16092 = load i32, ptr @PL_sv_count, align 4 ; <i32> [#uses=0]
+ br label %cond_next165
bb157: ; preds = %cond_next165
- %tmp158 = load i32, ptr @PL_sv_count, align 4 ; <i32> [#uses=0]
- br label %cond_next165
+ %tmp158 = load i32, ptr @PL_sv_count, align 4 ; <i32> [#uses=0]
+ br label %cond_next165
cond_next165: ; preds = %bb157, %cond_next150
- br i1 false, label %bb171, label %bb157
+ br i1 false, label %bb171, label %bb157
bb171: ; preds = %cond_next165
- ret void
+ ret void
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-25-InfiniteLoop.ll b/llvm/test/Transforms/NewGVN/2007-07-25-InfiniteLoop.ll
index abb6fbe..5202a2b 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-25-InfiniteLoop.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-25-InfiniteLoop.ll
@@ -1,15 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
- %struct.INT2 = type { i32, i32 }
+ %struct.INT2 = type { i32, i32 }
@blkshifts = external global ptr ; <ptr> [#uses=2]
define i32 @xcompact() {
+; CHECK-LABEL: define i32 @xcompact() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store ptr null, ptr @blkshifts, align 4
+; CHECK-NEXT: br label [[BB:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: br label [[BB]]
+;
entry:
- store ptr null, ptr @blkshifts, align 4
- br label %bb
+ store ptr null, ptr @blkshifts, align 4
+ br label %bb
bb: ; preds = %bb, %entry
- %tmp10 = load ptr, ptr @blkshifts, align 4 ; <ptr> [#uses=0]
-; CHECK-NOT: %tmp10
- br label %bb
+ %tmp10 = load ptr, ptr @blkshifts, align 4 ; <ptr> [#uses=0]
+ br label %bb
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-25-Loop.ll b/llvm/test/Transforms/NewGVN/2007-07-25-Loop.ll
index 336f390..2ee599c1 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-25-Loop.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-25-Loop.ll
@@ -1,15 +1,16 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
- %struct.s_segment_inf = type { float, i32, i16, i16, float, float, i32, float, float }
+ %struct.s_segment_inf = type { float, i32, i16, i16, float, float, i32, float, float }
define void @print_arch(ptr %arch_file, i32 %route_type, i64 %det_routing_arch.0.0, i64 %det_routing_arch.0.1, i64 %det_routing_arch.0.2, i64 %det_routing_arch.0.3, i64 %det_routing_arch.0.4, ptr %segment_inf, i64 %timing_inf.0.0, i64 %timing_inf.0.1, i64 %timing_inf.0.2, i64 %timing_inf.0.3, i64 %timing_inf.0.4, i32 %timing_inf.1) {
entry:
- br i1 false, label %bb278, label %bb344
+ br i1 false, label %bb278, label %bb344
bb278: ; preds = %bb278, %entry
- br i1 false, label %bb278, label %bb344
+ br i1 false, label %bb278, label %bb344
bb344: ; preds = %bb278, %entry
- %tmp38758 = load i16, ptr null, align 2 ; <i16> [#uses=0]
- ret void
+ %tmp38758 = load i16, ptr null, align 2 ; <i16> [#uses=0]
+ ret void
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-25-NestedLoop.ll b/llvm/test/Transforms/NewGVN/2007-07-25-NestedLoop.ll
index c46f2b7..e7461c2 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-25-NestedLoop.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-25-NestedLoop.ll
@@ -1,38 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
- %struct.TypHeader = type { i32, ptr, [3 x i8], i8 }
+ %struct.TypHeader = type { i32, ptr, [3 x i8], i8 }
define ptr @LtRec(ptr %hdL, ptr %hdR) {
entry:
- br i1 false, label %bb556.preheader, label %bb534.preheader
+ br i1 false, label %bb556.preheader, label %bb534.preheader
bb534.preheader: ; preds = %entry
- ret ptr null
+ ret ptr null
bb556.preheader: ; preds = %entry
- %tmp56119 = getelementptr %struct.TypHeader, ptr %hdR, i32 0, i32 0 ; <ptr> [#uses=1]
- %tmp56220 = load i32, ptr %tmp56119 ; <i32> [#uses=0]
- br i1 false, label %bb.nph23, label %bb675.preheader
+ %tmp56119 = getelementptr %struct.TypHeader, ptr %hdR, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp56220 = load i32, ptr %tmp56119 ; <i32> [#uses=0]
+ br i1 false, label %bb.nph23, label %bb675.preheader
bb.nph23: ; preds = %bb556.preheader
- ret ptr null
+ ret ptr null
bb656: ; preds = %bb675.outer, %bb656
- %tmp678 = load i32, ptr %tmp677 ; <i32> [#uses=0]
- br i1 false, label %bb684, label %bb656
+ %tmp678 = load i32, ptr %tmp677 ; <i32> [#uses=0]
+ br i1 false, label %bb684, label %bb656
bb684: ; preds = %bb675.outer, %bb656
- br i1 false, label %bb924.preheader, label %bb675.outer
+ br i1 false, label %bb924.preheader, label %bb675.outer
bb675.outer: ; preds = %bb675.preheader, %bb684
- %tmp67812 = load i32, ptr %tmp67711 ; <i32> [#uses=0]
- br i1 false, label %bb684, label %bb656
+ %tmp67812 = load i32, ptr %tmp67711 ; <i32> [#uses=0]
+ br i1 false, label %bb684, label %bb656
bb675.preheader: ; preds = %bb556.preheader
- %tmp67711 = getelementptr %struct.TypHeader, ptr %hdR, i32 0, i32 0 ; <ptr> [#uses=1]
- %tmp677 = getelementptr %struct.TypHeader, ptr %hdR, i32 0, i32 0 ; <ptr> [#uses=1]
- br label %bb675.outer
+ %tmp67711 = getelementptr %struct.TypHeader, ptr %hdR, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp677 = getelementptr %struct.TypHeader, ptr %hdR, i32 0, i32 0 ; <ptr> [#uses=1]
+ br label %bb675.outer
bb924.preheader: ; preds = %bb684
- ret ptr null
+ ret ptr null
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-25-SinglePredecessor.ll b/llvm/test/Transforms/NewGVN/2007-07-25-SinglePredecessor.ll
index 0b0597f..6fafce3 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-25-SinglePredecessor.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-25-SinglePredecessor.ll
@@ -1,29 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
- %struct.ggBRDF = type { ptr }
- %struct.ggBox3 = type { %struct.ggPoint3, %struct.ggPoint3 }
- %struct.ggMaterialRecord = type { %struct.ggPoint2, %struct.ggBox3, %struct.ggBox3, %struct.ggSpectrum, %struct.ggSpectrum, %struct.ggSpectrum, ptr, i32, i32, i32, i32 }
- %struct.ggONB3 = type { %struct.ggPoint3, %struct.ggPoint3, %struct.ggPoint3 }
- %struct.ggPoint2 = type { [2 x double] }
- %struct.ggPoint3 = type { [3 x double] }
- %struct.ggSpectrum = type { [8 x float] }
- %struct.mrViewingHitRecord = type { double, %struct.ggPoint3, %struct.ggONB3, %struct.ggPoint2, double, %struct.ggSpectrum, %struct.ggSpectrum, i32, i32, i32, i32 }
- %struct.mrXEllipticalCylinder = type { %struct.ggBRDF, float, float, float, float, float, float }
+ %struct.ggBRDF = type { ptr }
+ %struct.ggBox3 = type { %struct.ggPoint3, %struct.ggPoint3 }
+ %struct.ggMaterialRecord = type { %struct.ggPoint2, %struct.ggBox3, %struct.ggBox3, %struct.ggSpectrum, %struct.ggSpectrum, %struct.ggSpectrum, ptr, i32, i32, i32, i32 }
+ %struct.ggONB3 = type { %struct.ggPoint3, %struct.ggPoint3, %struct.ggPoint3 }
+ %struct.ggPoint2 = type { [2 x double] }
+ %struct.ggPoint3 = type { [3 x double] }
+ %struct.ggSpectrum = type { [8 x float] }
+ %struct.mrViewingHitRecord = type { double, %struct.ggPoint3, %struct.ggONB3, %struct.ggPoint2, double, %struct.ggSpectrum, %struct.ggSpectrum, i32, i32, i32, i32 }
+ %struct.mrXEllipticalCylinder = type { %struct.ggBRDF, float, float, float, float, float, float }
define i32 @_ZNK21mrZEllipticalCylinder10viewingHitERK6ggRay3dddR18mrViewingHitRecordR16ggMaterialRecord(ptr %this, ptr %ray, double %unnamed_arg, double %tmin, double %tmax, ptr %VHR, ptr %unnamed_arg2) {
entry:
- %tmp80.i = getelementptr %struct.mrViewingHitRecord, ptr %VHR, i32 0, i32 1, i32 0, i32 0 ; <ptr> [#uses=1]
- store double 0.000000e+00, ptr %tmp80.i
- br i1 false, label %return, label %cond_next.i
+ %tmp80.i = getelementptr %struct.mrViewingHitRecord, ptr %VHR, i32 0, i32 1, i32 0, i32 0 ; <ptr> [#uses=1]
+ store double 0.000000e+00, ptr %tmp80.i
+ br i1 false, label %return, label %cond_next.i
cond_next.i: ; preds = %entry
- br i1 false, label %return, label %cond_true
+ br i1 false, label %return, label %cond_true
cond_true: ; preds = %cond_next.i
- %tmp3.i8 = getelementptr %struct.mrViewingHitRecord, ptr %VHR, i32 0, i32 1, i32 0, i32 0 ; <ptr> [#uses=1]
- %tmp46 = load double, ptr %tmp3.i8 ; <double> [#uses=0]
- ret i32 1
+ %tmp3.i8 = getelementptr %struct.mrViewingHitRecord, ptr %VHR, i32 0, i32 1, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp46 = load double, ptr %tmp3.i8 ; <double> [#uses=0]
+ ret i32 1
return: ; preds = %cond_next.i, %entry
- ret i32 0
+ ret i32 0
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-26-NonRedundant.ll b/llvm/test/Transforms/NewGVN/2007-07-26-NonRedundant.ll
index 8d3bfcd..a64901e 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-26-NonRedundant.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-26-NonRedundant.ll
@@ -1,16 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
@bsLive = external global i32 ; <ptr> [#uses=2]
define i32 @bsR(i32 %n) {
entry:
- br i1 false, label %cond_next, label %bb19
+ br i1 false, label %cond_next, label %bb19
cond_next: ; preds = %entry
- store i32 0, ptr @bsLive, align 4
- br label %bb19
+ store i32 0, ptr @bsLive, align 4
+ br label %bb19
bb19: ; preds = %cond_next, %entry
- %tmp29 = load i32, ptr @bsLive, align 4 ; <i32> [#uses=0]
- ret i32 0
+ %tmp29 = load i32, ptr @bsLive, align 4 ; <i32> [#uses=0]
+ ret i32 0
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll b/llvm/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll
index 22d6432..46f9b84 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll
@@ -20,7 +20,7 @@ define i32 @reload(ptr %first, i32 %global, ptr %dumpfile) {
; CHECK: cond_next2943:
; CHECK-NEXT: br i1 false, label [[BB2982_PREHEADER:%.*]], label [[BB2928]]
; CHECK: bb2982.preheader:
-; CHECK-NEXT: store i8 poison, ptr null
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: ret i32 poison
;
cond_next2835.1: ; preds = %cond_next2861
diff --git a/llvm/test/Transforms/NewGVN/2007-07-30-PredIDom.ll b/llvm/test/Transforms/NewGVN/2007-07-30-PredIDom.ll
index 59da31c..c708460 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-30-PredIDom.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-30-PredIDom.ll
@@ -1,274 +1,275 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
- %"struct.Block::$_16" = type { i32 }
- %struct.Exp = type { ptr, i32, i32, i32, ptr, ptr, %"struct.Exp::$_10", %"struct.Block::$_16", %"struct.Exp::$_12" }
- %"struct.Exp::$_10" = type { ptr }
- %"struct.Exp::$_12" = type { ptr }
- %struct.Exp_ = type { i32, i32, i32, i32, ptr }
- %struct.Id = type { ptr, i32, i32, i32, %"struct.Id::$_13" }
- %"struct.Id::$_13" = type { double }
+ %"struct.Block::$_16" = type { i32 }
+ %struct.Exp = type { ptr, i32, i32, i32, ptr, ptr, %"struct.Exp::$_10", %"struct.Block::$_16", %"struct.Exp::$_12" }
+ %"struct.Exp::$_10" = type { ptr }
+ %"struct.Exp::$_12" = type { ptr }
+ %struct.Exp_ = type { i32, i32, i32, i32, ptr }
+ %struct.Id = type { ptr, i32, i32, i32, %"struct.Id::$_13" }
+ %"struct.Id::$_13" = type { double }
define ptr @_ZN3Exp8toStringEj(ptr %this, i32 %nextpc) {
entry:
- switch i32 0, label %bb970 [
- i32 1, label %bb
- i32 2, label %bb39
- i32 3, label %bb195
- i32 4, label %bb270
- i32 5, label %bb418
- i32 6, label %bb633
- i32 7, label %bb810
- i32 8, label %bb882
- i32 9, label %bb925
- ]
+ switch i32 0, label %bb970 [
+ i32 1, label %bb
+ i32 2, label %bb39
+ i32 3, label %bb195
+ i32 4, label %bb270
+ i32 5, label %bb418
+ i32 6, label %bb633
+ i32 7, label %bb810
+ i32 8, label %bb882
+ i32 9, label %bb925
+ ]
bb: ; preds = %entry
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb39: ; preds = %entry
- br i1 false, label %cond_true, label %cond_false132
+ br i1 false, label %cond_true, label %cond_false132
cond_true: ; preds = %bb39
- br i1 false, label %cond_true73, label %cond_false
+ br i1 false, label %cond_true73, label %cond_false
cond_true73: ; preds = %cond_true
- br i1 false, label %cond_true108, label %cond_next
+ br i1 false, label %cond_true108, label %cond_next
cond_true108: ; preds = %cond_true73
- br label %cond_next
+ br label %cond_next
cond_next: ; preds = %cond_true108, %cond_true73
- br label %cond_next131
+ br label %cond_next131
cond_false: ; preds = %cond_true
- br label %cond_next131
+ br label %cond_next131
cond_next131: ; preds = %cond_false, %cond_next
- br label %cond_next141
+ br label %cond_next141
cond_false132: ; preds = %bb39
- br label %cond_next141
+ br label %cond_next141
cond_next141: ; preds = %cond_false132, %cond_next131
- br i1 false, label %cond_true169, label %cond_false175
+ br i1 false, label %cond_true169, label %cond_false175
cond_true169: ; preds = %cond_next141
- br label %cond_next181
+ br label %cond_next181
cond_false175: ; preds = %cond_next141
- br label %cond_next181
+ br label %cond_next181
cond_next181: ; preds = %cond_false175, %cond_true169
- br i1 false, label %cond_true189, label %cond_next191
+ br i1 false, label %cond_true189, label %cond_next191
cond_true189: ; preds = %cond_next181
- br label %cond_next191
+ br label %cond_next191
cond_next191: ; preds = %cond_true189, %cond_next181
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb195: ; preds = %entry
- br i1 false, label %cond_true248, label %cond_false250
+ br i1 false, label %cond_true248, label %cond_false250
cond_true248: ; preds = %bb195
- br label %cond_next252
+ br label %cond_next252
cond_false250: ; preds = %bb195
- br label %cond_next252
+ br label %cond_next252
cond_next252: ; preds = %cond_false250, %cond_true248
- br i1 false, label %cond_true265, label %cond_next267
+ br i1 false, label %cond_true265, label %cond_next267
cond_true265: ; preds = %cond_next252
- br label %cond_next267
+ br label %cond_next267
cond_next267: ; preds = %cond_true265, %cond_next252
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb270: ; preds = %entry
- br i1 false, label %cond_true338, label %cond_false340
+ br i1 false, label %cond_true338, label %cond_false340
cond_true338: ; preds = %bb270
- br label %cond_next342
+ br label %cond_next342
cond_false340: ; preds = %bb270
- br label %cond_next342
+ br label %cond_next342
cond_next342: ; preds = %cond_false340, %cond_true338
- br i1 false, label %cond_true362, label %cond_false364
+ br i1 false, label %cond_true362, label %cond_false364
cond_true362: ; preds = %cond_next342
- br label %cond_next366
+ br label %cond_next366
cond_false364: ; preds = %cond_next342
- br label %cond_next366
+ br label %cond_next366
cond_next366: ; preds = %cond_false364, %cond_true362
- br i1 false, label %cond_true393, label %cond_next395
+ br i1 false, label %cond_true393, label %cond_next395
cond_true393: ; preds = %cond_next366
- br label %cond_next395
+ br label %cond_next395
cond_next395: ; preds = %cond_true393, %cond_next366
- br i1 false, label %cond_true406, label %cond_next408
+ br i1 false, label %cond_true406, label %cond_next408
cond_true406: ; preds = %cond_next395
- br label %cond_next408
+ br label %cond_next408
cond_next408: ; preds = %cond_true406, %cond_next395
- br i1 false, label %cond_true413, label %cond_next415
+ br i1 false, label %cond_true413, label %cond_next415
cond_true413: ; preds = %cond_next408
- br label %cond_next415
+ br label %cond_next415
cond_next415: ; preds = %cond_true413, %cond_next408
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb418: ; preds = %entry
- br i1 false, label %cond_true512, label %cond_false514
+ br i1 false, label %cond_true512, label %cond_false514
cond_true512: ; preds = %bb418
- br label %cond_next516
+ br label %cond_next516
cond_false514: ; preds = %bb418
- br label %cond_next516
+ br label %cond_next516
cond_next516: ; preds = %cond_false514, %cond_true512
- br i1 false, label %cond_true536, label %cond_false538
+ br i1 false, label %cond_true536, label %cond_false538
cond_true536: ; preds = %cond_next516
- br label %cond_next540
+ br label %cond_next540
cond_false538: ; preds = %cond_next516
- br label %cond_next540
+ br label %cond_next540
cond_next540: ; preds = %cond_false538, %cond_true536
- br i1 false, label %cond_true560, label %cond_false562
+ br i1 false, label %cond_true560, label %cond_false562
cond_true560: ; preds = %cond_next540
- br label %cond_next564
+ br label %cond_next564
cond_false562: ; preds = %cond_next540
- br label %cond_next564
+ br label %cond_next564
cond_next564: ; preds = %cond_false562, %cond_true560
- br i1 false, label %cond_true597, label %cond_next599
+ br i1 false, label %cond_true597, label %cond_next599
cond_true597: ; preds = %cond_next564
- br label %cond_next599
+ br label %cond_next599
cond_next599: ; preds = %cond_true597, %cond_next564
- br i1 false, label %cond_true614, label %cond_next616
+ br i1 false, label %cond_true614, label %cond_next616
cond_true614: ; preds = %cond_next599
- br label %cond_next616
+ br label %cond_next616
cond_next616: ; preds = %cond_true614, %cond_next599
- br i1 false, label %cond_true621, label %cond_next623
+ br i1 false, label %cond_true621, label %cond_next623
cond_true621: ; preds = %cond_next616
- br label %cond_next623
+ br label %cond_next623
cond_next623: ; preds = %cond_true621, %cond_next616
- br i1 false, label %cond_true628, label %cond_next630
+ br i1 false, label %cond_true628, label %cond_next630
cond_true628: ; preds = %cond_next623
- br label %cond_next630
+ br label %cond_next630
cond_next630: ; preds = %cond_true628, %cond_next623
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb633: ; preds = %entry
- br i1 false, label %cond_true667, label %cond_next669
+ br i1 false, label %cond_true667, label %cond_next669
cond_true667: ; preds = %bb633
- br label %cond_next669
+ br label %cond_next669
cond_next669: ; preds = %cond_true667, %bb633
- br i1 false, label %cond_true678, label %cond_next791
+ br i1 false, label %cond_true678, label %cond_next791
cond_true678: ; preds = %cond_next669
- br label %bb735
+ br label %bb735
bb679: ; preds = %bb735
- br i1 false, label %cond_true729, label %cond_next731
+ br i1 false, label %cond_true729, label %cond_next731
cond_true729: ; preds = %bb679
- br label %cond_next731
+ br label %cond_next731
cond_next731: ; preds = %cond_true729, %bb679
- br label %bb735
+ br label %bb735
bb735: ; preds = %cond_next731, %cond_true678
- br i1 false, label %bb679, label %bb743
+ br i1 false, label %bb679, label %bb743
bb743: ; preds = %bb735
- br i1 false, label %cond_true788, label %cond_next790
+ br i1 false, label %cond_true788, label %cond_next790
cond_true788: ; preds = %bb743
- br label %cond_next790
+ br label %cond_next790
cond_next790: ; preds = %cond_true788, %bb743
- br label %cond_next791
+ br label %cond_next791
cond_next791: ; preds = %cond_next790, %cond_next669
- br i1 false, label %cond_true805, label %cond_next807
+ br i1 false, label %cond_true805, label %cond_next807
cond_true805: ; preds = %cond_next791
- br label %cond_next807
+ br label %cond_next807
cond_next807: ; preds = %cond_true805, %cond_next791
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb810: ; preds = %entry
- br i1 false, label %cond_true870, label %cond_next872
+ br i1 false, label %cond_true870, label %cond_next872
cond_true870: ; preds = %bb810
- br label %cond_next872
+ br label %cond_next872
cond_next872: ; preds = %cond_true870, %bb810
- br i1 false, label %cond_true877, label %cond_next879
+ br i1 false, label %cond_true877, label %cond_next879
cond_true877: ; preds = %cond_next872
- br label %cond_next879
+ br label %cond_next879
cond_next879: ; preds = %cond_true877, %cond_next872
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb882: ; preds = %entry
- br i1 false, label %cond_true920, label %cond_next922
+ br i1 false, label %cond_true920, label %cond_next922
cond_true920: ; preds = %bb882
- br label %cond_next922
+ br label %cond_next922
cond_next922: ; preds = %cond_true920, %bb882
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb925: ; preds = %entry
- br i1 false, label %cond_true965, label %cond_next967
+ br i1 false, label %cond_true965, label %cond_next967
cond_true965: ; preds = %bb925
- br label %cond_next967
+ br label %cond_next967
cond_next967: ; preds = %cond_true965, %bb925
- store ptr null, ptr null
- br label %return
+ store ptr null, ptr null
+ br label %return
bb970: ; preds = %entry
- unreachable
- ; No predecessors!
- store ptr null, ptr null
- br label %return
+ unreachable
+ ; No predecessors!
+ store ptr null, ptr null
+ br label %return
return: ; preds = %0, %cond_next967, %cond_next922, %cond_next879, %cond_next807, %cond_next630, %cond_next415, %cond_next267, %cond_next191, %bb
- %retval980 = load ptr, ptr null ; <ptr> [#uses=1]
- ret ptr %retval980
+ %retval980 = load ptr, ptr null ; <ptr> [#uses=1]
+ ret ptr %retval980
}
diff --git a/llvm/test/Transforms/NewGVN/2007-07-31-RedundantPhi.ll b/llvm/test/Transforms/NewGVN/2007-07-31-RedundantPhi.ll
index 934fffc..5411bcc 100644
--- a/llvm/test/Transforms/NewGVN/2007-07-31-RedundantPhi.ll
+++ b/llvm/test/Transforms/NewGVN/2007-07-31-RedundantPhi.ll
@@ -1,23 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
@img_width = external global i16 ; <ptr> [#uses=2]
define i32 @smpUMHEXBipredIntegerPelBlockMotionSearch(ptr %cur_pic, i16 signext %ref, i32 %list, i32 %pic_pix_x, i32 %pic_pix_y, i32 %blocktype, i16 signext %pred_mv_x1, i16 signext %pred_mv_y1, i16 signext %pred_mv_x2, i16 signext %pred_mv_y2, ptr %mv_x, ptr %mv_y, ptr %s_mv_x, ptr %s_mv_y, i32 %search_range, i32 %min_mcost, i32 %lambda_factor) {
+; CHECK-LABEL: define i32 @smpUMHEXBipredIntegerPelBlockMotionSearch(
+; CHECK-SAME: ptr [[CUR_PIC:%.*]], i16 signext [[REF:%.*]], i32 [[LIST:%.*]], i32 [[PIC_PIX_X:%.*]], i32 [[PIC_PIX_Y:%.*]], i32 [[BLOCKTYPE:%.*]], i16 signext [[PRED_MV_X1:%.*]], i16 signext [[PRED_MV_Y1:%.*]], i16 signext [[PRED_MV_X2:%.*]], i16 signext [[PRED_MV_Y2:%.*]], ptr [[MV_X:%.*]], ptr [[MV_Y:%.*]], ptr [[S_MV_X:%.*]], ptr [[S_MV_Y:%.*]], i32 [[SEARCH_RANGE:%.*]], i32 [[MIN_MCOST:%.*]], i32 [[LAMBDA_FACTOR:%.*]]) {
+; CHECK-NEXT: cond_next143:
+; CHECK-NEXT: store i16 0, ptr @img_width, align 2
+; CHECK-NEXT: br i1 false, label [[COND_NEXT449:%.*]], label [[COND_FALSE434:%.*]]
+; CHECK: cond_false434:
+; CHECK-NEXT: br label [[COND_NEXT449]]
+; CHECK: cond_next449:
+; CHECK-NEXT: br i1 false, label [[COND_NEXT698:%.*]], label [[COND_FALSE470:%.*]]
+; CHECK: cond_false470:
+; CHECK-NEXT: br label [[COND_NEXT698]]
+; CHECK: cond_next698:
+; CHECK-NEXT: ret i32 0
+;
cond_next143: ; preds = %entry
- store i16 0, ptr @img_width, align 2
- br i1 false, label %cond_next449, label %cond_false434
+ store i16 0, ptr @img_width, align 2
+ br i1 false, label %cond_next449, label %cond_false434
cond_false434: ; preds = %cond_true415
- br label %cond_next449
+ br label %cond_next449
cond_next449: ; preds = %cond_false434, %cond_true415
- br i1 false, label %cond_next698, label %cond_false470
+ br i1 false, label %cond_next698, label %cond_false470
cond_false470: ; preds = %cond_next449
- br label %cond_next698
+ br label %cond_next698
cond_next698: ; preds = %cond_true492
- %tmp701 = load i16, ptr @img_width, align 2 ; <i16> [#uses=0]
-; CHECK-NOT: %tmp701 =
- ret i32 0
+ %tmp701 = load i16, ptr @img_width, align 2 ; <i16> [#uses=0]
+ ret i32 0
}
diff --git a/llvm/test/Transforms/NewGVN/2008-02-13-NewPHI.ll b/llvm/test/Transforms/NewGVN/2008-02-13-NewPHI.ll
index b2440fb..49d6de7 100644
--- a/llvm/test/Transforms/NewGVN/2008-02-13-NewPHI.ll
+++ b/llvm/test/Transforms/NewGVN/2008-02-13-NewPHI.ll
@@ -1,22 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn
; PR2032
define i32 @sscal(i32 %n, double %sa1, ptr %sx, i32 %incx) {
entry:
- %sx_addr = alloca ptr ; <ptr> [#uses=3]
- store ptr %sx, ptr %sx_addr, align 4
- br label %bb33
+ %sx_addr = alloca ptr ; <ptr> [#uses=3]
+ store ptr %sx, ptr %sx_addr, align 4
+ br label %bb33
bb: ; preds = %bb33
- %tmp27 = load ptr, ptr %sx_addr, align 4 ; <ptr> [#uses=1]
- store float 0.000000e+00, ptr %tmp27, align 4
- store ptr null, ptr %sx_addr, align 4
- br label %bb33
+ %tmp27 = load ptr, ptr %sx_addr, align 4 ; <ptr> [#uses=1]
+ store float 0.000000e+00, ptr %tmp27, align 4
+ store ptr null, ptr %sx_addr, align 4
+ br label %bb33
bb33: ; preds = %bb, %entry
- br i1 false, label %bb, label %return
+ br i1 false, label %bb, label %return
return: ; preds = %bb33
- %retval59 = load i32, ptr null, align 4 ; <i32> [#uses=1]
- ret i32 %retval59
+ %retval59 = load i32, ptr null, align 4 ; <i32> [#uses=1]
+ ret i32 %retval59
}
diff --git a/llvm/test/Transforms/NewGVN/2008-07-02-Unreachable.ll b/llvm/test/Transforms/NewGVN/2008-07-02-Unreachable.ll
index 0c1891e..cf591d7 100644
--- a/llvm/test/Transforms/NewGVN/2008-07-02-Unreachable.ll
+++ b/llvm/test/Transforms/NewGVN/2008-07-02-Unreachable.ll
@@ -1,36 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
; PR2503
@g_3 = external global i8 ; <ptr> [#uses=2]
define i8 @func_1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: define i8 @func_1(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IFELSE:%.*]], label [[IFTHEN:%.*]]
+; CHECK: ifthen:
+; CHECK-NEXT: br label [[IFEND:%.*]]
+; CHECK: ifelse:
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr @g_3, align 1
+; CHECK-NEXT: store i8 [[TMP3]], ptr [[A]], align 1
+; CHECK-NEXT: br label [[AFTERFOR:%.*]]
+; CHECK: forcond:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br i1 false, label [[AFTERFOR]], label [[FORBODY:%.*]]
+; CHECK: forbody:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br label [[FORINC:%.*]]
+; CHECK: forinc:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br label [[FORCOND:%.*]]
+; CHECK: afterfor:
+; CHECK-NEXT: ret i8 [[TMP3]]
+; CHECK: ifend:
+; CHECK-NEXT: ret i8 0
+;
entry:
%A = alloca i8
- %cmp = icmp eq i32 %x, %y
- br i1 %cmp, label %ifelse, label %ifthen
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %ifelse, label %ifthen
ifthen: ; preds = %entry
- br label %ifend
+ br label %ifend
ifelse: ; preds = %entry
- %tmp3 = load i8, ptr @g_3 ; <i8> [#uses=0]
- store i8 %tmp3, ptr %A
- br label %afterfor
+ %tmp3 = load i8, ptr @g_3 ; <i8> [#uses=0]
+ store i8 %tmp3, ptr %A
+ br label %afterfor
forcond: ; preds = %forinc
- br i1 false, label %afterfor, label %forbody
+ br i1 false, label %afterfor, label %forbody
forbody: ; preds = %forcond
- br label %forinc
+ br label %forinc
forinc: ; preds = %forbody
- br label %forcond
+ br label %forcond
afterfor: ; preds = %forcond, %forcond.thread
- %tmp10 = load i8, ptr @g_3 ; <i8> [#uses=0]
- ret i8 %tmp10
-; CHECK: ret i8 %tmp3
+ %tmp10 = load i8, ptr @g_3 ; <i8> [#uses=0]
+ ret i8 %tmp10
ifend: ; preds = %afterfor, %ifthen
- ret i8 0
+ ret i8 0
}
diff --git a/llvm/test/Transforms/NewGVN/2008-12-09-SelfRemove.ll b/llvm/test/Transforms/NewGVN/2008-12-09-SelfRemove.ll
index a2e252d..95cb229 100644
--- a/llvm/test/Transforms/NewGVN/2008-12-09-SelfRemove.ll
+++ b/llvm/test/Transforms/NewGVN/2008-12-09-SelfRemove.ll
@@ -1,38 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9.5"
- %struct.anon = type { ptr, i32 }
- %struct.d_print_info = type { i32, ptr, i32, i32, ptr, ptr, i32 }
- %struct.d_print_mod = type { ptr, ptr, i32, ptr }
- %struct.d_print_template = type { ptr, ptr }
- %struct.demangle_component = type { i32, { %struct.anon } }
+ %struct.anon = type { ptr, i32 }
+ %struct.d_print_info = type { i32, ptr, i32, i32, ptr, ptr, i32 }
+ %struct.d_print_mod = type { ptr, ptr, i32, ptr }
+ %struct.d_print_template = type { ptr, ptr }
+ %struct.demangle_component = type { i32, { %struct.anon } }
define void @d_print_mod_list(ptr %dpi, ptr %mods, i32 %suffix) nounwind {
+; CHECK-LABEL: define void @d_print_mod_list(
+; CHECK-SAME: ptr [[DPI:%.*]], ptr [[MODS:%.*]], i32 [[SUFFIX:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_D_PRINT_INFO:%.*]], ptr [[DPI]], i32 0, i32 1
+; CHECK-NEXT: br i1 false, label [[RETURN:%.*]], label [[BB:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: br label [[BB21:%.*]]
+; CHECK: bb21:
+; CHECK-NEXT: br label [[BB21]]
+; CHECK: return:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: ret void
+;
entry:
- %0 = getelementptr %struct.d_print_info, ptr %dpi, i32 0, i32 1 ; <ptr> [#uses=1]
- br i1 false, label %return, label %bb
+ %0 = getelementptr %struct.d_print_info, ptr %dpi, i32 0, i32 1 ; <ptr> [#uses=1]
+ br i1 false, label %return, label %bb
bb: ; preds = %entry
- %1 = load ptr, ptr %0, align 4 ; <ptr> [#uses=0]
- %2 = getelementptr %struct.d_print_info, ptr %dpi, i32 0, i32 1 ; <ptr> [#uses=0]
- br label %bb21
+ %1 = load ptr, ptr %0, align 4 ; <ptr> [#uses=0]
+ %2 = getelementptr %struct.d_print_info, ptr %dpi, i32 0, i32 1 ; <ptr> [#uses=0]
+ br label %bb21
bb21: ; preds = %bb21, %bb
- br label %bb21
+ br label %bb21
return: ; preds = %entry
- ret void
+ ret void
}
-; CHECK: define void @d_print_mod_list(ptr %dpi, ptr %mods, i32 %suffix) #0 {
-; CHECK: entry:
-; CHECK: %0 = getelementptr %struct.d_print_info, ptr %dpi, i32 0, i32 1
-; CHECK: br i1 false, label %return, label %bb
-; CHECK: bb:
-; CHECK: br label %bb21
-; CHECK: bb21:
-; CHECK: br label %bb21
-; CHECK: return:
-; CHECK: ret void
-; CHECK: }
diff --git a/llvm/test/Transforms/NewGVN/2008-12-12-RLE-Crash.ll b/llvm/test/Transforms/NewGVN/2008-12-12-RLE-Crash.ll
index bb51f72..3df35a1 100644
--- a/llvm/test/Transforms/NewGVN/2008-12-12-RLE-Crash.ll
+++ b/llvm/test/Transforms/NewGVN/2008-12-12-RLE-Crash.ll
@@ -1,35 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin7"
define i32 @main(i32 %argc, ptr %argv) nounwind {
entry:
- br label %bb84
+ br label %bb84
bb41: ; preds = %bb82
- %tmp = load i8, ptr %opt.0, align 1 ; <i8> [#uses=0]
- %tmp1 = getelementptr i8, ptr %opt.0, i32 1 ; <ptr> [#uses=2]
- switch i32 0, label %bb81 [
- i32 102, label %bb82
- i32 110, label %bb79
- i32 118, label %bb80
- ]
+ %tmp = load i8, ptr %opt.0, align 1 ; <i8> [#uses=0]
+ %tmp1 = getelementptr i8, ptr %opt.0, i32 1 ; <ptr> [#uses=2]
+ switch i32 0, label %bb81 [
+ i32 102, label %bb82
+ i32 110, label %bb79
+ i32 118, label %bb80
+ ]
bb79: ; preds = %bb41
- br label %bb82
+ br label %bb82
bb80: ; preds = %bb41
- ret i32 0
+ ret i32 0
bb81: ; preds = %bb41
- ret i32 1
+ ret i32 1
bb82: ; preds = %bb84, %bb79, %bb41
- %opt.0 = phi ptr [ %tmp3, %bb84 ], [ %tmp1, %bb79 ], [ %tmp1, %bb41 ] ; <ptr> [#uses=3]
- %tmp2 = load i8, ptr %opt.0, align 1 ; <i8> [#uses=0]
- br i1 false, label %bb84, label %bb41
+ %opt.0 = phi ptr [ %tmp3, %bb84 ], [ %tmp1, %bb79 ], [ %tmp1, %bb41 ] ; <ptr> [#uses=3]
+ %tmp2 = load i8, ptr %opt.0, align 1 ; <i8> [#uses=0]
+ br i1 false, label %bb84, label %bb41
bb84: ; preds = %bb82, %entry
- %tmp3 = getelementptr i8, ptr null, i32 1 ; <ptr> [#uses=1]
- br label %bb82
+ %tmp3 = getelementptr i8, ptr null, i32 1 ; <ptr> [#uses=1]
+ br label %bb82
}
diff --git a/llvm/test/Transforms/NewGVN/2008-12-14-rle-reanalyze.ll b/llvm/test/Transforms/NewGVN/2008-12-14-rle-reanalyze.ll
index 38d1240..8213556 100644
--- a/llvm/test/Transforms/NewGVN/2008-12-14-rle-reanalyze.ll
+++ b/llvm/test/Transforms/NewGVN/2008-12-14-rle-reanalyze.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin7"
@@ -5,14 +6,14 @@ target triple = "i386-apple-darwin7"
define i32 @Quiesce(i32 %alpha, i32 %beta, i32 %wtm, i32 %ply) nounwind {
entry:
- br label %bb22
+ br label %bb22
bb22: ; preds = %bb23, %bb22, %entry
- br i1 false, label %bb23, label %bb22
+ br i1 false, label %bb23, label %bb22
bb23: ; preds = %bb23, %bb22
- %sortv.233 = phi ptr [ @sort_value, %bb22 ], [ %sortv.2, %bb23 ] ; <ptr> [#uses=1]
- %0 = load i32, ptr %sortv.233, align 4 ; <i32> [#uses=0]
- %sortv.2 = getelementptr [256 x i32], ptr @sort_value, i32 0, i32 0 ; <ptr> [#uses=1]
- br i1 false, label %bb23, label %bb22
+ %sortv.233 = phi ptr [ @sort_value, %bb22 ], [ %sortv.2, %bb23 ] ; <ptr> [#uses=1]
+ %0 = load i32, ptr %sortv.233, align 4 ; <i32> [#uses=0]
+ %sortv.2 = getelementptr [256 x i32], ptr @sort_value, i32 0, i32 0 ; <ptr> [#uses=1]
+ br i1 false, label %bb23, label %bb22
}
diff --git a/llvm/test/Transforms/NewGVN/2008-12-15-CacheVisited.ll b/llvm/test/Transforms/NewGVN/2008-12-15-CacheVisited.ll
index 0b1761d..ff9ecce 100644
--- a/llvm/test/Transforms/NewGVN/2008-12-15-CacheVisited.ll
+++ b/llvm/test/Transforms/NewGVN/2008-12-15-CacheVisited.ll
@@ -1,28 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
; Cached results must be added to and verified against the visited sets.
; PR3217
define fastcc void @gen_field_die(ptr %decl) nounwind {
entry:
- br i1 false, label %bb203, label %bb202
+ br i1 false, label %bb203, label %bb202
bb202: ; preds = %entry
- unreachable
+ unreachable
bb203: ; preds = %entry
- %tmp = getelementptr i32, ptr %decl, i32 1 ; <ptr> [#uses=1]
- %tmp1 = load i32, ptr %tmp, align 4 ; <i32> [#uses=0]
- br i1 false, label %bb207, label %bb204
+ %tmp = getelementptr i32, ptr %decl, i32 1 ; <ptr> [#uses=1]
+ %tmp1 = load i32, ptr %tmp, align 4 ; <i32> [#uses=0]
+ br i1 false, label %bb207, label %bb204
bb204: ; preds = %bb203
- %tmp2 = getelementptr i32, ptr %decl, i32 1 ; <ptr> [#uses=1]
- br label %bb208
+ %tmp2 = getelementptr i32, ptr %decl, i32 1 ; <ptr> [#uses=1]
+ br label %bb208
bb207: ; preds = %bb203
- br label %bb208
+ br label %bb208
bb208: ; preds = %bb207, %bb204
- %iftmp.1374.0.in = phi ptr [ null, %bb207 ], [ %tmp2, %bb204 ] ; <ptr> [#uses=1]
- %iftmp.1374.0 = load i32, ptr %iftmp.1374.0.in ; <i32> [#uses=0]
- unreachable
+ %iftmp.1374.0.in = phi ptr [ null, %bb207 ], [ %tmp2, %bb204 ] ; <ptr> [#uses=1]
+ %iftmp.1374.0 = load i32, ptr %iftmp.1374.0.in ; <i32> [#uses=0]
+ unreachable
}
diff --git a/llvm/test/Transforms/NewGVN/2009-01-21-SortInvalidation.ll b/llvm/test/Transforms/NewGVN/2009-01-21-SortInvalidation.ll
index 8631cbd..1f0a32a 100644
--- a/llvm/test/Transforms/NewGVN/2009-01-21-SortInvalidation.ll
+++ b/llvm/test/Transforms/NewGVN/2009-01-21-SortInvalidation.ll
@@ -1,55 +1,56 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
; PR3358
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
- %struct.re_pattern_buffer = type { ptr, i64, i64, i64, ptr, ptr, i64, i8 }
- %struct.re_registers = type { i32, ptr, ptr }
+ %struct.re_pattern_buffer = type { ptr, i64, i64, i64, ptr, ptr, i64, i8 }
+ %struct.re_registers = type { i32, ptr, ptr }
define fastcc i32 @byte_re_match_2_internal(ptr nocapture %bufp, ptr %string1, i32 %size1, ptr %string2, i32 %size2, i32 %pos, ptr %regs, i32 %stop) nounwind {
entry:
- br label %bb159
+ br label %bb159
succeed_label: ; preds = %bb159
- ret i32 0
+ ret i32 0
bb159: ; preds = %bb664, %bb554, %bb159, %bb159, %bb159, %entry
- %d.0 = phi ptr [ null, %entry ], [ %d.0, %bb159 ], [ %d.0, %bb554 ], [ %d.0, %bb159 ], [ %d.0, %bb159 ], [ %d.12, %bb664 ] ; <ptr> [#uses=5]
- switch i32 0, label %bb661 [
- i32 0, label %bb159
- i32 1, label %succeed_label
- i32 13, label %bb159
- i32 14, label %bb159
- i32 16, label %bb411
- i32 24, label %bb622
- i32 28, label %bb543
- ]
+ %d.0 = phi ptr [ null, %entry ], [ %d.0, %bb159 ], [ %d.0, %bb554 ], [ %d.0, %bb159 ], [ %d.0, %bb159 ], [ %d.12, %bb664 ] ; <ptr> [#uses=5]
+ switch i32 0, label %bb661 [
+ i32 0, label %bb159
+ i32 1, label %succeed_label
+ i32 13, label %bb159
+ i32 14, label %bb159
+ i32 16, label %bb411
+ i32 24, label %bb622
+ i32 28, label %bb543
+ ]
bb411: ; preds = %bb411, %bb159
- br label %bb411
+ br label %bb411
bb543: ; preds = %bb159
- br i1 false, label %bb549, label %bb550
+ br i1 false, label %bb549, label %bb550
bb549: ; preds = %bb543
- br label %bb554
+ br label %bb554
bb550: ; preds = %bb543
- br i1 false, label %bb554, label %bb552
+ br i1 false, label %bb554, label %bb552
bb552: ; preds = %bb550
- %0 = load i8, ptr %d.0, align 8 ; <i8> [#uses=0]
- br label %bb554
+ %0 = load i8, ptr %d.0, align 8 ; <i8> [#uses=0]
+ br label %bb554
bb554: ; preds = %bb552, %bb550, %bb549
- br i1 false, label %bb159, label %bb661
+ br i1 false, label %bb159, label %bb661
bb622: ; preds = %bb622, %bb159
- br label %bb622
+ br label %bb622
bb661: ; preds = %bb554, %bb159
- %d.12 = select i1 false, ptr null, ptr null ; <ptr> [#uses=1]
- br label %bb664
+ %d.12 = select i1 false, ptr null, ptr null ; <ptr> [#uses=1]
+ br label %bb664
bb664: ; preds = %bb664, %bb661
- br i1 false, label %bb159, label %bb664
+ br i1 false, label %bb159, label %bb664
}
diff --git a/llvm/test/Transforms/NewGVN/2009-01-22-SortInvalidation.ll b/llvm/test/Transforms/NewGVN/2009-01-22-SortInvalidation.ll
index d8871700d..25f73dc 100644
--- a/llvm/test/Transforms/NewGVN/2009-01-22-SortInvalidation.ll
+++ b/llvm/test/Transforms/NewGVN/2009-01-22-SortInvalidation.ll
@@ -1,100 +1,101 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin7"
- %struct..4sPragmaType = type { ptr, i32 }
- %struct.AggInfo = type { i8, i8, i32, ptr, i32, ptr, i32, i32, i32, ptr, i32, i32 }
- %struct.AggInfo_col = type { ptr, i32, i32, i32, i32, ptr }
- %struct.AggInfo_func = type { ptr, ptr, i32, i32 }
- %struct.AuxData = type { ptr, ptr }
- %struct.Bitvec = type { i32, i32, i32, { [125 x i32] } }
- %struct.BtCursor = type { ptr, ptr, ptr, ptr, ptr, ptr, i32, ptr, i32, %struct.CellInfo, i8, i8, ptr, i64, i32, i8, ptr }
- %struct.BtLock = type { ptr, i32, i8, ptr }
- %struct.BtShared = type { ptr, ptr, ptr, ptr, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i32, i32, i32, i32, i8, i32, ptr, ptr, ptr, %struct.BusyHandler, i32, ptr, ptr, ptr }
- %struct.Btree = type { ptr, ptr, i8, i8, i8, i32, ptr, ptr }
- %struct.BtreeMutexArray = type { i32, [11 x ptr] }
- %struct.BusyHandler = type { ptr, ptr, i32 }
- %struct.CellInfo = type { ptr, i64, i32, i32, i16, i16, i16, i16 }
- %struct.CollSeq = type { ptr, i8, i8, ptr, ptr, ptr }
- %struct.Column = type { ptr, ptr, ptr, ptr, i8, i8, i8, i8 }
- %struct.Context = type { i64, i32, %struct.Fifo }
- %struct.CountCtx = type { i64 }
- %struct.Cursor = type { ptr, i32, i64, i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i64, ptr, i32, ptr, i64, ptr, ptr, i32, i64, ptr, ptr, i32, i32, ptr, ptr, ptr }
- %struct.Db = type { ptr, ptr, i8, i8, ptr, ptr, ptr }
- %struct.Expr = type { i8, i8, i16, ptr, ptr, ptr, ptr, %struct..4sPragmaType, %struct..4sPragmaType, i32, i32, ptr, i32, i32, ptr, ptr, i32 }
- %struct.ExprList = type { i32, i32, i32, ptr }
- %struct.ExprList_item = type { ptr, ptr, i8, i8, i8 }
- %struct.FKey = type { ptr, ptr, ptr, ptr, i32, ptr, i8, i8, i8, i8 }
- %struct.Fifo = type { i32, ptr, ptr }
- %struct.FifoPage = type { i32, i32, i32, ptr, [1 x i64] }
- %struct.FuncDef = type { i16, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, [1 x i8] }
- %struct.Hash = type { i8, i8, i32, i32, ptr, ptr }
- %struct.HashElem = type { ptr, ptr, ptr, ptr, i32 }
- %struct.IdList = type { ptr, i32, i32 }
- %struct.Index = type { ptr, i32, ptr, ptr, ptr, i32, i8, i8, ptr, ptr, ptr, ptr, ptr }
- %struct.KeyInfo = type { ptr, i8, i8, i8, i32, ptr, [1 x ptr] }
- %struct.Mem = type { %struct.CountCtx, double, ptr, ptr, i32, i16, i8, i8, ptr }
- %struct.MemPage = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, [5 x %struct._OvflCell], ptr, ptr, ptr, i32, ptr }
- %struct.Module = type { ptr, ptr, ptr, ptr }
- %struct.Op = type { i8, i8, i8, i8, i32, i32, i32, { i32 } }
- %struct.Pager = type { ptr, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, %struct.PagerLruList, ptr, ptr, ptr, i64, i64, i64, i64, i64, i32, ptr, ptr, i32, ptr, ptr, [16 x i8] }
- %struct.PagerLruLink = type { ptr, ptr }
- %struct.PagerLruList = type { ptr, ptr, ptr }
- %struct.Parse = type { ptr, i32, ptr, ptr, i8, i8, i8, i8, i8, i8, i8, [8 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [12 x i32], i32, ptr, i32, i32, i32, i32, i32, ptr, i8, %struct..4sPragmaType, %struct..4sPragmaType, %struct..4sPragmaType, ptr, ptr, ptr, ptr, ptr, ptr, %struct..4sPragmaType, i8, ptr, i32 }
- %struct.PgHdr = type { ptr, i32, ptr, ptr, %struct.PagerLruLink, ptr, i8, i8, i8, i8, i8, i16, ptr, ptr, ptr }
- %struct.Schema = type { i32, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Hash, ptr, i8, i8, i16, i32, ptr }
- %struct.Select = type { ptr, i8, i8, i8, i8, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, [3 x i32] }
- %struct.SrcList = type { i16, i16, [1 x %struct.SrcList_item] }
- %struct.SrcList_item = type { ptr, ptr, ptr, ptr, ptr, i8, i8, i32, ptr, ptr, i64 }
- %struct.Table = type { ptr, i32, ptr, i32, ptr, i32, ptr, i32, ptr, ptr, ptr, ptr, i32, i8, i8, i8, i8, i8, i8, i8, ptr, ptr, i32, ptr, ptr }
- %struct.TableLock = type { i32, i32, i8, ptr }
- %struct.Trigger = type { ptr, ptr, i8, i8, ptr, ptr, %struct..4sPragmaType, ptr, ptr, ptr, ptr }
- %struct.TriggerStack = type { ptr, i32, i32, i32, i32, i32, i32, ptr, ptr }
- %struct.TriggerStep = type { i32, i32, ptr, ptr, %struct..4sPragmaType, ptr, ptr, ptr, ptr, ptr }
- %struct.Vdbe = type { ptr, ptr, ptr, i32, i32, ptr, i32, i32, ptr, ptr, ptr, i32, ptr, i32, ptr, ptr, i32, i32, i32, ptr, i32, i32, %struct.Fifo, i32, i32, ptr, i32, i32, i32, i32, i32, [25 x i32], i32, i32, ptr, ptr, ptr, i8, i8, i8, i8, i8, i8, i32, i64, i32, %struct.BtreeMutexArray, i32, ptr, i32 }
- %struct.VdbeFunc = type { ptr, i32, [1 x %struct.AuxData] }
- %struct._OvflCell = type { ptr, i16 }
- %struct._ht = type { i32, ptr }
- %struct.anon = type { double }
- %struct.sColMap = type { i32, ptr }
- %struct.sqlite3 = type { ptr, i32, ptr, i32, i32, i32, i32, i8, i8, i8, i8, i32, ptr, i64, i64, i32, i32, i32, ptr, %struct.sqlite3InitInfo, i32, ptr, ptr, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, %struct.anon, ptr, ptr, ptr, ptr, i32, %struct.Hash, ptr, ptr, i32, %struct.Hash, %struct.Hash, %struct.BusyHandler, i32, [2 x %struct.Db], i8 }
- %struct.sqlite3InitInfo = type { i32, i32, i8 }
- %struct.sqlite3_context = type { ptr, ptr, %struct.Mem, ptr, i32, ptr }
- %struct.sqlite3_file = type { ptr }
- %struct.sqlite3_index_constraint = type { i32, i8, i8, i32 }
- %struct.sqlite3_index_constraint_usage = type { i32, i8 }
- %struct.sqlite3_index_info = type { i32, ptr, i32, ptr, ptr, i32, ptr, i32, i32, double }
- %struct.sqlite3_io_methods = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
- %struct.sqlite3_module = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
- %struct.sqlite3_mutex = type opaque
- %struct.sqlite3_vfs = type { i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
- %struct.sqlite3_vtab = type { ptr, i32, ptr }
- %struct.sqlite3_vtab_cursor = type { ptr }
+ %struct..4sPragmaType = type { ptr, i32 }
+ %struct.AggInfo = type { i8, i8, i32, ptr, i32, ptr, i32, i32, i32, ptr, i32, i32 }
+ %struct.AggInfo_col = type { ptr, i32, i32, i32, i32, ptr }
+ %struct.AggInfo_func = type { ptr, ptr, i32, i32 }
+ %struct.AuxData = type { ptr, ptr }
+ %struct.Bitvec = type { i32, i32, i32, { [125 x i32] } }
+ %struct.BtCursor = type { ptr, ptr, ptr, ptr, ptr, ptr, i32, ptr, i32, %struct.CellInfo, i8, i8, ptr, i64, i32, i8, ptr }
+ %struct.BtLock = type { ptr, i32, i8, ptr }
+ %struct.BtShared = type { ptr, ptr, ptr, ptr, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i32, i32, i32, i32, i8, i32, ptr, ptr, ptr, %struct.BusyHandler, i32, ptr, ptr, ptr }
+ %struct.Btree = type { ptr, ptr, i8, i8, i8, i32, ptr, ptr }
+ %struct.BtreeMutexArray = type { i32, [11 x ptr] }
+ %struct.BusyHandler = type { ptr, ptr, i32 }
+ %struct.CellInfo = type { ptr, i64, i32, i32, i16, i16, i16, i16 }
+ %struct.CollSeq = type { ptr, i8, i8, ptr, ptr, ptr }
+ %struct.Column = type { ptr, ptr, ptr, ptr, i8, i8, i8, i8 }
+ %struct.Context = type { i64, i32, %struct.Fifo }
+ %struct.CountCtx = type { i64 }
+ %struct.Cursor = type { ptr, i32, i64, i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i64, ptr, i32, ptr, i64, ptr, ptr, i32, i64, ptr, ptr, i32, i32, ptr, ptr, ptr }
+ %struct.Db = type { ptr, ptr, i8, i8, ptr, ptr, ptr }
+ %struct.Expr = type { i8, i8, i16, ptr, ptr, ptr, ptr, %struct..4sPragmaType, %struct..4sPragmaType, i32, i32, ptr, i32, i32, ptr, ptr, i32 }
+ %struct.ExprList = type { i32, i32, i32, ptr }
+ %struct.ExprList_item = type { ptr, ptr, i8, i8, i8 }
+ %struct.FKey = type { ptr, ptr, ptr, ptr, i32, ptr, i8, i8, i8, i8 }
+ %struct.Fifo = type { i32, ptr, ptr }
+ %struct.FifoPage = type { i32, i32, i32, ptr, [1 x i64] }
+ %struct.FuncDef = type { i16, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, [1 x i8] }
+ %struct.Hash = type { i8, i8, i32, i32, ptr, ptr }
+ %struct.HashElem = type { ptr, ptr, ptr, ptr, i32 }
+ %struct.IdList = type { ptr, i32, i32 }
+ %struct.Index = type { ptr, i32, ptr, ptr, ptr, i32, i8, i8, ptr, ptr, ptr, ptr, ptr }
+ %struct.KeyInfo = type { ptr, i8, i8, i8, i32, ptr, [1 x ptr] }
+ %struct.Mem = type { %struct.CountCtx, double, ptr, ptr, i32, i16, i8, i8, ptr }
+ %struct.MemPage = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, [5 x %struct._OvflCell], ptr, ptr, ptr, i32, ptr }
+ %struct.Module = type { ptr, ptr, ptr, ptr }
+ %struct.Op = type { i8, i8, i8, i8, i32, i32, i32, { i32 } }
+ %struct.Pager = type { ptr, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, %struct.PagerLruList, ptr, ptr, ptr, i64, i64, i64, i64, i64, i32, ptr, ptr, i32, ptr, ptr, [16 x i8] }
+ %struct.PagerLruLink = type { ptr, ptr }
+ %struct.PagerLruList = type { ptr, ptr, ptr }
+ %struct.Parse = type { ptr, i32, ptr, ptr, i8, i8, i8, i8, i8, i8, i8, [8 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [12 x i32], i32, ptr, i32, i32, i32, i32, i32, ptr, i8, %struct..4sPragmaType, %struct..4sPragmaType, %struct..4sPragmaType, ptr, ptr, ptr, ptr, ptr, ptr, %struct..4sPragmaType, i8, ptr, i32 }
+ %struct.PgHdr = type { ptr, i32, ptr, ptr, %struct.PagerLruLink, ptr, i8, i8, i8, i8, i8, i16, ptr, ptr, ptr }
+ %struct.Schema = type { i32, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Hash, ptr, i8, i8, i16, i32, ptr }
+ %struct.Select = type { ptr, i8, i8, i8, i8, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, [3 x i32] }
+ %struct.SrcList = type { i16, i16, [1 x %struct.SrcList_item] }
+ %struct.SrcList_item = type { ptr, ptr, ptr, ptr, ptr, i8, i8, i32, ptr, ptr, i64 }
+ %struct.Table = type { ptr, i32, ptr, i32, ptr, i32, ptr, i32, ptr, ptr, ptr, ptr, i32, i8, i8, i8, i8, i8, i8, i8, ptr, ptr, i32, ptr, ptr }
+ %struct.TableLock = type { i32, i32, i8, ptr }
+ %struct.Trigger = type { ptr, ptr, i8, i8, ptr, ptr, %struct..4sPragmaType, ptr, ptr, ptr, ptr }
+ %struct.TriggerStack = type { ptr, i32, i32, i32, i32, i32, i32, ptr, ptr }
+ %struct.TriggerStep = type { i32, i32, ptr, ptr, %struct..4sPragmaType, ptr, ptr, ptr, ptr, ptr }
+ %struct.Vdbe = type { ptr, ptr, ptr, i32, i32, ptr, i32, i32, ptr, ptr, ptr, i32, ptr, i32, ptr, ptr, i32, i32, i32, ptr, i32, i32, %struct.Fifo, i32, i32, ptr, i32, i32, i32, i32, i32, [25 x i32], i32, i32, ptr, ptr, ptr, i8, i8, i8, i8, i8, i8, i32, i64, i32, %struct.BtreeMutexArray, i32, ptr, i32 }
+ %struct.VdbeFunc = type { ptr, i32, [1 x %struct.AuxData] }
+ %struct._OvflCell = type { ptr, i16 }
+ %struct._ht = type { i32, ptr }
+ %struct.anon = type { double }
+ %struct.sColMap = type { i32, ptr }
+ %struct.sqlite3 = type { ptr, i32, ptr, i32, i32, i32, i32, i8, i8, i8, i8, i32, ptr, i64, i64, i32, i32, i32, ptr, %struct.sqlite3InitInfo, i32, ptr, ptr, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, %struct.anon, ptr, ptr, ptr, ptr, i32, %struct.Hash, ptr, ptr, i32, %struct.Hash, %struct.Hash, %struct.BusyHandler, i32, [2 x %struct.Db], i8 }
+ %struct.sqlite3InitInfo = type { i32, i32, i8 }
+ %struct.sqlite3_context = type { ptr, ptr, %struct.Mem, ptr, i32, ptr }
+ %struct.sqlite3_file = type { ptr }
+ %struct.sqlite3_index_constraint = type { i32, i8, i8, i32 }
+ %struct.sqlite3_index_constraint_usage = type { i32, i8 }
+ %struct.sqlite3_index_info = type { i32, ptr, i32, ptr, ptr, i32, ptr, i32, i32, double }
+ %struct.sqlite3_io_methods = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+ %struct.sqlite3_module = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+ %struct.sqlite3_mutex = type opaque
+ %struct.sqlite3_vfs = type { i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+ %struct.sqlite3_vtab = type { ptr, i32, ptr }
+ %struct.sqlite3_vtab_cursor = type { ptr }
define fastcc void @sqlite3Insert(ptr %pParse, ptr %pTabList, ptr %pList, ptr %pSelect, ptr %pColumn, i32 %onError) nounwind {
entry:
- br i1 false, label %bb54, label %bb69.loopexit
+ br i1 false, label %bb54, label %bb69.loopexit
bb54: ; preds = %entry
- br label %bb69.loopexit
+ br label %bb69.loopexit
bb59: ; preds = %bb63.preheader
- %0 = load ptr, ptr %3, align 4 ; <ptr> [#uses=0]
- br label %bb65
+ %0 = load ptr, ptr %3, align 4 ; <ptr> [#uses=0]
+ br label %bb65
bb65: ; preds = %bb63.preheader, %bb59
- %1 = load ptr, ptr %4, align 4 ; <ptr> [#uses=0]
- br i1 false, label %bb67, label %bb63.preheader
+ %1 = load ptr, ptr %4, align 4 ; <ptr> [#uses=0]
+ br i1 false, label %bb67, label %bb63.preheader
bb67: ; preds = %bb65
- %2 = getelementptr %struct.IdList, ptr %pColumn, i32 0, i32 0 ; <ptr> [#uses=0]
- unreachable
+ %2 = getelementptr %struct.IdList, ptr %pColumn, i32 0, i32 0 ; <ptr> [#uses=0]
+ unreachable
bb69.loopexit: ; preds = %bb54, %entry
- %3 = getelementptr %struct.IdList, ptr %pColumn, i32 0, i32 0 ; <ptr> [#uses=1]
- %4 = getelementptr %struct.IdList, ptr %pColumn, i32 0, i32 0 ; <ptr> [#uses=1]
- br label %bb63.preheader
+ %3 = getelementptr %struct.IdList, ptr %pColumn, i32 0, i32 0 ; <ptr> [#uses=1]
+ %4 = getelementptr %struct.IdList, ptr %pColumn, i32 0, i32 0 ; <ptr> [#uses=1]
+ br label %bb63.preheader
bb63.preheader: ; preds = %bb69.loopexit, %bb65
- br i1 false, label %bb59, label %bb65
+ br i1 false, label %bb59, label %bb65
}
diff --git a/llvm/test/Transforms/NewGVN/2009-03-10-PREOnVoid.ll b/llvm/test/Transforms/NewGVN/2009-03-10-PREOnVoid.ll
index 6aa79e0..83177e5 100644
--- a/llvm/test/Transforms/NewGVN/2009-03-10-PREOnVoid.ll
+++ b/llvm/test/Transforms/NewGVN/2009-03-10-PREOnVoid.ll
@@ -1,21 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -disable-output
; PR3775
; ModuleID = 'bugpoint-reduced-simplified.bc'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
- %llvm.dbg.anchor.type = type { i32, i32 }
- %"struct.__gnu_cxx::hash<ptr>" = type <{ i8 }>
- %struct.__sched_param = type { i32 }
- %struct._pthread_descr_struct = type opaque
- %struct.pthread_attr_t = type { i32, i32, %struct.__sched_param, i32, i32, i32, i32, ptr, i32 }
- %struct.pthread_mutex_t = type { i32, i32, ptr, i32, %llvm.dbg.anchor.type }
- %"struct.std::_Rb_tree<ptr,std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > >,std::_Select1st<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,std::less<ptr>,std::allocator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > > >" = type { %"struct.std::_Rb_tree<ptr,std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > >,std::_Select1st<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,std::less<ptr>,std::allocator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > > >::_Rb_tree_impl<std::less<ptr>,false>" }
- %"struct.std::_Rb_tree<ptr,std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > >,std::_Select1st<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,std::less<ptr>,std::allocator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > > >::_Rb_tree_impl<std::less<ptr>,false>" = type { %"struct.__gnu_cxx::hash<ptr>", %"struct.std::_Rb_tree_node_base", i32 }
- %"struct.std::_Rb_tree_iterator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >" = type { ptr }
- %"struct.std::_Rb_tree_node_base" = type { i32, ptr, ptr, ptr }
- %"struct.std::pair<std::_Rb_tree_iterator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,bool>" = type { %"struct.std::_Rb_tree_iterator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >", i8 }
- %"struct.std::pair<ptr const,ptr>" = type { ptr, ptr }
+ %llvm.dbg.anchor.type = type { i32, i32 }
+ %"struct.__gnu_cxx::hash<ptr>" = type <{ i8 }>
+ %struct.__sched_param = type { i32 }
+ %struct._pthread_descr_struct = type opaque
+ %struct.pthread_attr_t = type { i32, i32, %struct.__sched_param, i32, i32, i32, i32, ptr, i32 }
+ %struct.pthread_mutex_t = type { i32, i32, ptr, i32, %llvm.dbg.anchor.type }
+ %"struct.std::_Rb_tree<ptr,std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > >,std::_Select1st<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,std::less<ptr>,std::allocator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > > >" = type { %"struct.std::_Rb_tree<ptr,std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > >,std::_Select1st<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,std::less<ptr>,std::allocator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > > >::_Rb_tree_impl<std::less<ptr>,false>" }
+ %"struct.std::_Rb_tree<ptr,std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > >,std::_Select1st<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,std::less<ptr>,std::allocator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > > >::_Rb_tree_impl<std::less<ptr>,false>" = type { %"struct.__gnu_cxx::hash<ptr>", %"struct.std::_Rb_tree_node_base", i32 }
+ %"struct.std::_Rb_tree_iterator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >" = type { ptr }
+ %"struct.std::_Rb_tree_node_base" = type { i32, ptr, ptr, ptr }
+ %"struct.std::pair<std::_Rb_tree_iterator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,bool>" = type { %"struct.std::_Rb_tree_iterator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >", i8 }
+ %"struct.std::pair<ptr const,ptr>" = type { ptr, ptr }
@_ZL20__gthrw_pthread_oncePiPFvvE = weak alias i32 (ptr, ptr), ptr @pthread_once ; <ptr> [#uses=0]
@_ZL27__gthrw_pthread_getspecificj = weak alias ptr (i32), ptr @pthread_getspecific ; <ptr> [#uses=0]
@@ -36,75 +37,75 @@ declare fastcc void @_ZNSt10_Select1stISt4pairIKPvS1_EEC1Ev() nounwind readnone
define fastcc void @_ZNSt8_Rb_treeIPvSt4pairIKS0_S0_ESt10_Select1stIS3_ESt4lessIS0_ESaIS3_EE16_M_insert_uniqueERKS3_(ptr noalias nocapture sret(%"struct.std::pair<std::_Rb_tree_iterator<std::pair<ptr const, std::vector<ShadowInfo, std::allocator<ShadowInfo> > > >,bool>") %agg.result, ptr %this, ptr %__v) nounwind {
entry:
- br i1 false, label %bb7, label %bb
+ br i1 false, label %bb7, label %bb
bb: ; preds = %bb, %entry
- br i1 false, label %bb5, label %bb
+ br i1 false, label %bb5, label %bb
bb5: ; preds = %bb
- call fastcc void @_ZNSt10_Select1stISt4pairIKPvS1_EEC1Ev() nounwind
- br i1 false, label %bb11, label %bb7
+ call fastcc void @_ZNSt10_Select1stISt4pairIKPvS1_EEC1Ev() nounwind
+ br i1 false, label %bb11, label %bb7
bb7: ; preds = %bb5, %entry
- br label %bb11
+ br label %bb11
bb11: ; preds = %bb7, %bb5
- call fastcc void @_ZNSt10_Select1stISt4pairIKPvS1_EEC1Ev() nounwind
- unreachable
+ call fastcc void @_ZNSt10_Select1stISt4pairIKPvS1_EEC1Ev() nounwind
+ unreachable
}
define i32 @pthread_once(ptr, ptr) {
- ret i32 0
+ ret i32 0
}
define ptr @pthread_getspecific(i32) {
- ret ptr null
+ ret ptr null
}
define i32 @pthread_setspecific(i32, ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_create(ptr, ptr, ptr, ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_cancel(i32) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_mutex_lock(ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_mutex_trylock(ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_mutex_unlock(ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_mutex_init(ptr, ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_key_create(ptr, ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_key_delete(i32) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_mutexattr_init(ptr) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_mutexattr_settype(ptr, i32) {
- ret i32 0
+ ret i32 0
}
define i32 @pthread_mutexattr_destroy(ptr) {
- ret i32 0
+ ret i32 0
}
diff --git a/llvm/test/Transforms/NewGVN/2009-07-13-MemDepSortFail.ll b/llvm/test/Transforms/NewGVN/2009-07-13-MemDepSortFail.ll
index 24ad185..9694ae4 100644
--- a/llvm/test/Transforms/NewGVN/2009-07-13-MemDepSortFail.ll
+++ b/llvm/test/Transforms/NewGVN/2009-07-13-MemDepSortFail.ll
@@ -1,67 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn | llvm-dis
; PR4256
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
- %llvm.dbg.anchor.type = type { i32, i32 }
- %struct.cset = type { ptr, i8, i8, i32, ptr }
- %struct.lmat = type { ptr, i32, ptr, ptr, ptr, ptr, ptr, ptr, i32, ptr, ptr, ptr, ptr, ptr }
- %struct.re_guts = type { ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, i32, i32, i32, i32, [1 x i8] }
+ %llvm.dbg.anchor.type = type { i32, i32 }
+ %struct.cset = type { ptr, i8, i8, i32, ptr }
+ %struct.lmat = type { ptr, i32, ptr, ptr, ptr, ptr, ptr, ptr, i32, ptr, ptr, ptr, ptr, ptr }
+ %struct.re_guts = type { ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, i32, i32, i32, i32, [1 x i8] }
define ptr @lbackref(ptr %m, ptr %start, ptr %stop, i32 %startst, i32 %stopst, i32 %lev, i32 %rec) nounwind {
entry:
- br label %bb63
+ br label %bb63
bb: ; preds = %bb63
- switch i32 0, label %bb62 [
- i32 268435456, label %bb2
- i32 805306368, label %bb9
- i32 -1610612736, label %bb51
- ]
+ switch i32 0, label %bb62 [
+ i32 268435456, label %bb2
+ i32 805306368, label %bb9
+ i32 -1610612736, label %bb51
+ ]
bb2: ; preds = %bb
- br label %bb62
+ br label %bb62
bb9: ; preds = %bb
- %0 = load i8, ptr %sp.1, align 1 ; <i8> [#uses=0]
- br label %bb62
+ %0 = load i8, ptr %sp.1, align 1 ; <i8> [#uses=0]
+ br label %bb62
bb51: ; preds = %bb
- %1 = load i8, ptr %sp.1, align 1 ; <i8> [#uses=0]
- ret ptr null
+ %1 = load i8, ptr %sp.1, align 1 ; <i8> [#uses=0]
+ ret ptr null
bb62: ; preds = %bb9, %bb2, %bb
- br label %bb63
+ br label %bb63
bb63: ; preds = %bb84, %bb69, %bb62, %entry
- %sp.1 = phi ptr [ null, %bb62 ], [ %sp.1.lcssa, %bb84 ], [ %start, %entry ], [ %sp.1.lcssa, %bb69 ] ; <ptr> [#uses=3]
- br i1 false, label %bb, label %bb65
+ %sp.1 = phi ptr [ null, %bb62 ], [ %sp.1.lcssa, %bb84 ], [ %start, %entry ], [ %sp.1.lcssa, %bb69 ] ; <ptr> [#uses=3]
+ br i1 false, label %bb, label %bb65
bb65: ; preds = %bb63
- %sp.1.lcssa = phi ptr [ %sp.1, %bb63 ] ; <ptr> [#uses=4]
- br i1 false, label %bb66, label %bb69
+ %sp.1.lcssa = phi ptr [ %sp.1, %bb63 ] ; <ptr> [#uses=4]
+ br i1 false, label %bb66, label %bb69
bb66: ; preds = %bb65
- ret ptr null
+ ret ptr null
bb69: ; preds = %bb65
- switch i32 0, label %bb108.loopexit2.loopexit.loopexit [
- i32 1342177280, label %bb63
- i32 1476395008, label %bb84
- i32 1879048192, label %bb104
- i32 2013265920, label %bb93
- ]
+ switch i32 0, label %bb108.loopexit2.loopexit.loopexit [
+ i32 1342177280, label %bb63
+ i32 1476395008, label %bb84
+ i32 1879048192, label %bb104
+ i32 2013265920, label %bb93
+ ]
bb84: ; preds = %bb69
- %2 = tail call ptr @lbackref(ptr %m, ptr %sp.1.lcssa, ptr %stop, i32 0, i32 %stopst, i32 0, i32 0) nounwind ; <ptr> [#uses=0]
- br label %bb63
+ %2 = tail call ptr @lbackref(ptr %m, ptr %sp.1.lcssa, ptr %stop, i32 0, i32 %stopst, i32 0, i32 0) nounwind ; <ptr> [#uses=0]
+ br label %bb63
bb93: ; preds = %bb69
- ret ptr null
+ ret ptr null
bb104: ; preds = %bb69
- %sp.1.lcssa.lcssa33 = phi ptr [ %sp.1.lcssa, %bb69 ] ; <ptr> [#uses=0]
- unreachable
+ %sp.1.lcssa.lcssa33 = phi ptr [ %sp.1.lcssa, %bb69 ] ; <ptr> [#uses=0]
+ unreachable
bb108.loopexit2.loopexit.loopexit: ; preds = %bb69
- ret ptr null
+ ret ptr null
}
diff --git a/llvm/test/Transforms/NewGVN/2009-11-12-MemDepMallocBitCast.ll b/llvm/test/Transforms/NewGVN/2009-11-12-MemDepMallocBitCast.ll
index 3eda7ca..c49f651 100644
--- a/llvm/test/Transforms/NewGVN/2009-11-12-MemDepMallocBitCast.ll
+++ b/llvm/test/Transforms/NewGVN/2009-11-12-MemDepMallocBitCast.ll
@@ -1,14 +1,19 @@
-; Test to make sure malloc's bitcast does not block detection of a store
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; Test to make sure malloc's bitcast does not block detection of a store
; to aliased memory; GVN should not optimize away the load in this program.
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
define i64 @test() {
+; CHECK-LABEL: define i64 @test() {
+; CHECK-NEXT: [[TMP1:%.*]] = tail call ptr @malloc(i64 mul (i64 ptrtoint (ptr getelementptr (i64, ptr null, i64 1) to i64), i64 4))
+; CHECK-NEXT: store i8 42, ptr [[TMP1]], align 1
+; CHECK-NEXT: [[Y:%.*]] = load i64, ptr [[TMP1]], align 4
+; CHECK-NEXT: ret i64 [[Y]]
+;
%1 = tail call ptr @malloc(i64 mul (i64 4, i64 ptrtoint (ptr getelementptr (i64, ptr null, i64 1) to i64))) ; <ptr> [#uses=2]
store i8 42, ptr %1
%Y = load i64, ptr %1 ; <i64> [#uses=1]
ret i64 %Y
-; CHECK: %Y = load i64, ptr %1
-; CHECK: ret i64 %Y
}
declare noalias ptr @malloc(i64)
diff --git a/llvm/test/Transforms/NewGVN/2010-03-31-RedundantPHIs.ll b/llvm/test/Transforms/NewGVN/2010-03-31-RedundantPHIs.ll
index 321f3cf..c6fc7b9 100644
--- a/llvm/test/Transforms/NewGVN/2010-03-31-RedundantPHIs.ll
+++ b/llvm/test/Transforms/NewGVN/2010-03-31-RedundantPHIs.ll
@@ -1,9 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
; CHECK-NOT: load
; CHECK-NOT: phi
define ptr @cat(ptr %s1, ...) nounwind {
+; CHECK-LABEL: define ptr @cat(
+; CHECK-SAME: ptr [[S1:%.*]], ...) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 undef, label [[BB:%.*]], label [[BB3:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: unreachable
+; CHECK: bb3:
+; CHECK-NEXT: store ptr undef, ptr undef, align 4
+; CHECK-NEXT: br i1 undef, label [[BB5:%.*]], label [[BB6:%.*]]
+; CHECK: bb5:
+; CHECK-NEXT: unreachable
+; CHECK: bb6:
+; CHECK-NEXT: br label [[BB12:%.*]]
+; CHECK: bb8:
+; CHECK-NEXT: br i1 undef, label [[BB9:%.*]], label [[BB10:%.*]]
+; CHECK: bb9:
+; CHECK-NEXT: br label [[BB11:%.*]]
+; CHECK: bb10:
+; CHECK-NEXT: br label [[BB11]]
+; CHECK: bb11:
+; CHECK-NEXT: br label [[BB12]]
+; CHECK: bb12:
+; CHECK-NEXT: br i1 undef, label [[BB8:%.*]], label [[BB13:%.*]]
+; CHECK: bb13:
+; CHECK-NEXT: ret ptr undef
+;
entry:
br i1 undef, label %bb, label %bb3
diff --git a/llvm/test/Transforms/NewGVN/2010-05-08-OneBit.ll b/llvm/test/Transforms/NewGVN/2010-05-08-OneBit.ll
index 0d2d45a..0a121ff 100644
--- a/llvm/test/Transforms/NewGVN/2010-05-08-OneBit.ll
+++ b/llvm/test/Transforms/NewGVN/2010-05-08-OneBit.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn
; PR7052
@@ -12,7 +13,7 @@ entry:
l117.i.i: ; preds = %entry
invoke fastcc void @foo()
- to label %.noexc5 unwind label %landing_pad
+ to label %.noexc5 unwind label %landing_pad
.noexc5: ; preds = %l117.i.i
unreachable
@@ -22,7 +23,7 @@ k121.i.i: ; preds = %entry
l129.i.i: ; preds = %k121.i.i
invoke fastcc void @foo()
- to label %.noexc7 unwind label %landing_pad
+ to label %.noexc7 unwind label %landing_pad
.noexc7: ; preds = %l129.i.i
unreachable
@@ -34,7 +35,7 @@ k133.i.i: ; preds = %k121.i.i
l147.i.i: ; preds = %k133.i.i
invoke fastcc void @foo()
- to label %.noexc10 unwind label %landing_pad
+ to label %.noexc10 unwind label %landing_pad
.noexc10: ; preds = %l147.i.i
unreachable
@@ -44,10 +45,10 @@ k151.i.i: ; preds = %k133.i.i
landing_pad: ; preds = %l147.i.i, %l129.i.i, %l117.i.i
%exn = landingpad {ptr, i32}
- cleanup
+ cleanup
switch i32 undef, label %fin [
- i32 1, label %catch1
- i32 2, label %catch
+ i32 1, label %catch1
+ i32 2, label %catch
]
fin: ; preds = %landing_pad
diff --git a/llvm/test/Transforms/NewGVN/2010-11-13-Simplify.ll b/llvm/test/Transforms/NewGVN/2010-11-13-Simplify.ll
index b06570b..3d12783 100644
--- a/llvm/test/Transforms/NewGVN/2010-11-13-Simplify.ll
+++ b/llvm/test/Transforms/NewGVN/2010-11-13-Simplify.ll
@@ -1,9 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
declare i32 @foo(i32) readnone
define i1 @bar() {
-; CHECK-LABEL: @bar(
+; CHECK-LABEL: define i1 @bar() {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(i32 0) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT: [[X:%.*]] = call i32 @foo(i32 [[A]]) #[[ATTR0]]
+; CHECK-NEXT: ret i1 true
+;
%a = call i32 @foo (i32 0) readnone
%b = call i32 @foo (i32 0) readnone
%c = and i32 %a, %b
@@ -11,5 +16,4 @@ define i1 @bar() {
%y = call i32 @foo (i32 %c) readnone
%z = icmp eq i32 %x, %y
ret i1 %z
-; CHECK: ret i1 true
-}
+}
diff --git a/llvm/test/Transforms/NewGVN/2011-04-27-phioperands.ll b/llvm/test/Transforms/NewGVN/2011-04-27-phioperands.ll
index 3e8a5d8..c039422 100644
--- a/llvm/test/Transforms/NewGVN/2011-04-27-phioperands.ll
+++ b/llvm/test/Transforms/NewGVN/2011-04-27-phioperands.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
@@ -64,10 +65,10 @@ doemit.exit76.i:
"<bb 64>.i":
switch i32 undef, label %"<bb 5>" [
- i32 42, label %"<L54>.i"
- i32 43, label %"<L55>.i"
- i32 63, label %"<L56>.i"
- i32 123, label %"<bb 5>.i258.i"
+ i32 42, label %"<L54>.i"
+ i32 43, label %"<L55>.i"
+ i32 63, label %"<L56>.i"
+ i32 123, label %"<bb 5>.i258.i"
]
"<L54>.i":
@@ -93,14 +94,14 @@ doemit.exit127.i:
"<bb 5>":
switch i32 undef, label %"<L39>.i" [
- i32 36, label %"<L19>.i"
- i32 94, label %"<L18>.i"
- i32 124, label %"<L98>.i"
- i32 42, label %"<L99>.i"
- i32 43, label %"<L99>.i"
- i32 46, label %"<L24>.i"
- i32 63, label %"<L99>.i"
- i32 91, label %"<L28>.i"
- i32 92, label %"<L29>.i"
+ i32 36, label %"<L19>.i"
+ i32 94, label %"<L18>.i"
+ i32 124, label %"<L98>.i"
+ i32 42, label %"<L99>.i"
+ i32 43, label %"<L99>.i"
+ i32 46, label %"<L24>.i"
+ i32 63, label %"<L99>.i"
+ i32 91, label %"<L28>.i"
+ i32 92, label %"<L29>.i"
]
}
diff --git a/llvm/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll b/llvm/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll
index c547e8f..444385d 100644
--- a/llvm/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll
+++ b/llvm/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll
@@ -1,9 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
;
%0 = type { i64, i1 }
define i64 @test1(i64 %a, i64 %b) nounwind ssp {
+; CHECK-LABEL: define i64 @test1(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1
+; CHECK-NEXT: [[UADD_0:%.*]] = extractvalue [[TMP0]] [[TMP4]], 0
+; CHECK-NEXT: [[ADD2:%.*]] = add i64 [[TMP1]], [[UADD_0]]
+; CHECK-NEXT: ret i64 [[ADD2]]
+;
entry:
%uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%uadd.0 = extractvalue %0 %uadd, 0
@@ -12,11 +25,20 @@ entry:
ret i64 %add2
}
-; CHECK-LABEL: @test1(
-; CHECK-NOT: add1
-; CHECK: ret
define i64 @test2(i64 %a, i64 %b) nounwind ssp {
+; CHECK-LABEL: define i64 @test2(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1
+; CHECK-NEXT: [[USUB_0:%.*]] = extractvalue [[TMP0]] [[TMP4]], 0
+; CHECK-NEXT: [[ADD2:%.*]] = add i64 [[TMP1]], [[USUB_0]]
+; CHECK-NEXT: ret i64 [[ADD2]]
+;
entry:
%usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
%usub.0 = extractvalue %0 %usub, 0
@@ -25,11 +47,20 @@ entry:
ret i64 %add2
}
-; CHECK-LABEL: @test2(
-; CHECK-NOT: sub1
-; CHECK: ret
define i64 @test3(i64 %a, i64 %b) nounwind ssp {
+; CHECK-LABEL: define i64 @test3(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1
+; CHECK-NEXT: [[UMUL_0:%.*]] = extractvalue [[TMP0]] [[TMP4]], 0
+; CHECK-NEXT: [[ADD2:%.*]] = add i64 [[TMP1]], [[UMUL_0]]
+; CHECK-NEXT: ret i64 [[ADD2]]
+;
entry:
%umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
%umul.0 = extractvalue %0 %umul, 0
@@ -38,11 +69,20 @@ entry:
ret i64 %add2
}
-; CHECK-LABEL: @test3(
-; CHECK-NOT: mul1
-; CHECK: ret
define i64 @test4(i64 %a, i64 %b) nounwind ssp {
+; CHECK-LABEL: define i64 @test4(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1
+; CHECK-NEXT: [[SADD_0:%.*]] = extractvalue [[TMP0]] [[TMP4]], 0
+; CHECK-NEXT: [[ADD2:%.*]] = add i64 [[TMP1]], [[SADD_0]]
+; CHECK-NEXT: ret i64 [[ADD2]]
+;
entry:
%sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
%sadd.0 = extractvalue %0 %sadd, 0
@@ -51,11 +91,20 @@ entry:
ret i64 %add2
}
-; CHECK-LABEL: @test4(
-; CHECK-NOT: add1
-; CHECK: ret
define i64 @test5(i64 %a, i64 %b) nounwind ssp {
+; CHECK-LABEL: define i64 @test5(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1
+; CHECK-NEXT: [[SSUB_0:%.*]] = extractvalue [[TMP0]] [[TMP4]], 0
+; CHECK-NEXT: [[ADD2:%.*]] = add i64 [[TMP1]], [[SSUB_0]]
+; CHECK-NEXT: ret i64 [[ADD2]]
+;
entry:
%ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
%ssub.0 = extractvalue %0 %ssub, 0
@@ -64,11 +113,20 @@ entry:
ret i64 %add2
}
-; CHECK-LABEL: @test5(
-; CHECK-NOT: sub1
-; CHECK: ret
define i64 @test6(i64 %a, i64 %b) nounwind ssp {
+; CHECK-LABEL: define i64 @test6(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1
+; CHECK-NEXT: [[SMUL_0:%.*]] = extractvalue [[TMP0]] [[TMP4]], 0
+; CHECK-NEXT: [[ADD2:%.*]] = add i64 [[TMP1]], [[SMUL_0]]
+; CHECK-NEXT: ret i64 [[ADD2]]
+;
entry:
%smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
%smul.0 = extractvalue %0 %smul, 0
@@ -77,9 +135,6 @@ entry:
ret i64 %add2
}
-; CHECK-LABEL: @test6(
-; CHECK-NOT: mul1
-; CHECK: ret
declare void @exit(i32) noreturn
declare %0 @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll b/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll
index 46e3c28..675e7da 100644
--- a/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll
+++ b/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
%struct.__fundamental_type_info_pseudo = type { %struct.__type_info_pseudo }
%struct.__type_info_pseudo = type { ptr, ptr }
@@ -18,26 +19,70 @@ declare void @__cxa_end_catch()
declare i32 @__gxx_personality_v0(i32, i64, ptr, ptr)
define void @_Z3foov() uwtable personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: define void @_Z3foov(
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] personality ptr @__gxx_personality_v0 {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: invoke void @_Z4barv()
+; CHECK-NEXT: to label [[RETURN:%.*]] unwind label [[LPAD:%.*]]
+; CHECK: lpad:
+; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: catch ptr @_ZTIi
+; CHECK-NEXT: catch ptr @_ZTIb
+; CHECK-NEXT: catch ptr @_ZTIi
+; CHECK-NEXT: catch ptr @_ZTIb
+; CHECK-NEXT: [[EXC_PTR2_I:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 0
+; CHECK-NEXT: [[FILTER3_I:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 1
+; CHECK-NEXT: [[TYPEID_I:%.*]] = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[FILTER3_I]], [[TYPEID_I]]
+; CHECK-NEXT: br i1 [[TMP1]], label [[PPAD:%.*]], label [[NEXT:%.*]]
+; CHECK: next:
+; CHECK-NEXT: [[TYPEID1_I:%.*]] = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIb)
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[FILTER3_I]], [[TYPEID1_I]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[PPAD2:%.*]], label [[NEXT2:%.*]]
+; CHECK: ppad:
+; CHECK-NEXT: [[TMP3:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR1]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: ppad2:
+; CHECK-NEXT: [[D_2073_5_I:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR1]]
+; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR1]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: next2:
+; CHECK-NEXT: call void @_Z7cleanupv()
+; CHECK-NEXT: br i1 [[TMP1]], label [[PPAD3:%.*]], label [[NEXT3:%.*]]
+; CHECK: next3:
+; CHECK-NEXT: br i1 [[TMP2]], label [[PPAD4:%.*]], label [[UNWIND:%.*]]
+; CHECK: unwind:
+; CHECK-NEXT: resume { ptr, i32 } [[TMP0]]
+; CHECK: ppad3:
+; CHECK-NEXT: [[TMP4:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR1]]
+; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR1]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: ppad4:
+; CHECK-NEXT: [[D_2080_5:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR1]]
+; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR1]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: ret void
+;
entry:
invoke void @_Z4barv()
- to label %return unwind label %lpad
+ to label %return unwind label %lpad
lpad: ; preds = %entry
%0 = landingpad { ptr, i32 }
- catch ptr @_ZTIi
- catch ptr @_ZTIb
- catch ptr @_ZTIi
- catch ptr @_ZTIb
+ catch ptr @_ZTIi
+ catch ptr @_ZTIb
+ catch ptr @_ZTIi
+ catch ptr @_ZTIb
%exc_ptr2.i = extractvalue { ptr, i32 } %0, 0
%filter3.i = extractvalue { ptr, i32 } %0, 1
%typeid.i = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
-; CHECK: call i32 @llvm.eh.typeid.for
%1 = icmp eq i32 %filter3.i, %typeid.i
br i1 %1, label %ppad, label %next
next: ; preds = %lpad
%typeid1.i = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIb)
-; CHECK: call i32 @llvm.eh.typeid.for
%2 = icmp eq i32 %filter3.i, %typeid1.i
br i1 %2, label %ppad2, label %next2
@@ -54,7 +99,6 @@ ppad2: ; preds = %next
next2: ; preds = %next
call void @_Z7cleanupv()
%typeid = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
-; CHECK-NOT: call i32 @llvm.eh.typeid.for
%4 = icmp eq i32 %filter3.i, %typeid
br i1 %4, label %ppad3, label %next3
diff --git a/llvm/test/Transforms/NewGVN/2012-05-22-PreCrash.ll b/llvm/test/Transforms/NewGVN/2012-05-22-PreCrash.ll
index 787a3ba..1357f2b 100644
--- a/llvm/test/Transforms/NewGVN/2012-05-22-PreCrash.ll
+++ b/llvm/test/Transforms/NewGVN/2012-05-22-PreCrash.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn
; PR12858
diff --git a/llvm/test/Transforms/NewGVN/2016-08-30-MaskedScatterGather-xfail-inseltpoison.ll b/llvm/test/Transforms/NewGVN/2016-08-30-MaskedScatterGather-xfail-inseltpoison.ll
index 2fb275d..7b3f33b 100644
--- a/llvm/test/Transforms/NewGVN/2016-08-30-MaskedScatterGather-xfail-inseltpoison.ll
+++ b/llvm/test/Transforms/NewGVN/2016-08-30-MaskedScatterGather-xfail-inseltpoison.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; XFAIL: *
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
@@ -14,6 +15,25 @@ declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x
; CHECK: llvm.masked.scatter
; CHECK: llvm.masked.gather
define spir_kernel void @test(<2 x ptr> %in1, <2 x ptr> %in2, ptr %out) {
+; CHECK-LABEL: define spir_kernel void @test(
+; CHECK-SAME: <2 x ptr> [[IN1:%.*]], <2 x ptr> [[IN2:%.*]], ptr [[OUT:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP_0:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP_1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP_I:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP_0]], i32 0
+; CHECK-NEXT: [[TMP:%.*]] = insertelement <2 x ptr> [[TMP_I]], ptr [[TMP_1]], i32 1
+; CHECK-NEXT: [[IN1_V:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[IN1]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i32> undef)
+; CHECK-NEXT: [[IN2_V:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[IN2]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i32> undef)
+; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[IN1_V]], <2 x ptr> [[TMP]], i32 1, <2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[IN2_V]], <2 x ptr> [[TMP]], i32 1, <2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT: [[TMP_V_1:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i32> undef)
+; CHECK-NEXT: [[TMP_V_1_0:%.*]] = extractelement <2 x i32> [[TMP_V_1]], i32 0
+; CHECK-NEXT: [[TMP_V_1_1:%.*]] = extractelement <2 x i32> [[TMP_V_1]], i32 1
+; CHECK-NEXT: store i32 [[TMP_V_1_0]], ptr [[OUT]], align 4
+; CHECK-NEXT: [[OUT_1:%.*]] = getelementptr i32, ptr [[OUT]], i32 1
+; CHECK-NEXT: store i32 [[TMP_V_1_1]], ptr [[OUT_1]], align 4
+; CHECK-NEXT: ret void
+;
entry:
; Just some temporary storage
%tmp.0 = alloca i32
diff --git a/llvm/test/Transforms/NewGVN/MemdepMiscompile.ll b/llvm/test/Transforms/NewGVN/MemdepMiscompile.ll
index f2d1827..a3f1f4d 100644
--- a/llvm/test/Transforms/NewGVN/MemdepMiscompile.ll
+++ b/llvm/test/Transforms/NewGVN/MemdepMiscompile.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-macosx10.7.0"
@@ -7,14 +8,38 @@ target triple = "x86_64-apple-macosx10.7.0"
; Make sure we do not replace load %shouldExit in while.cond.backedge
; with a phi node where the value from while.body is 0.
define i32 @test() nounwind ssp {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SHOULDEXIT:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TASKSIDLE:%.*]] = alloca i32, align 4
+; CHECK-NEXT: store i32 0, ptr [[SHOULDEXIT]], align 4
+; CHECK-NEXT: store i32 0, ptr [[TASKSIDLE]], align 4
+; CHECK-NEXT: call void @CTestInitialize(ptr [[TASKSIDLE]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[SHOULDEXIT]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; CHECK-NEXT: br i1 [[CMP1]], label [[WHILE_BODY_LR_PH:%.*]], label [[WHILE_END:%.*]]
+; CHECK: while.body.lr.ph:
+; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
+; CHECK: while.body:
+; CHECK-NEXT: call void @RunInMode(i32 100) #[[ATTR1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TASKSIDLE]], align 4
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TOBOOL]], label [[WHILE_COND_BACKEDGE:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: store i32 0, ptr [[TASKSIDLE]], align 4
+; CHECK-NEXT: call void @TimerCreate(ptr [[SHOULDEXIT]]) #[[ATTR1]]
+; CHECK-NEXT: br label [[WHILE_COND_BACKEDGE]]
+; CHECK: while.cond.backedge:
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SHOULDEXIT]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[WHILE_BODY]], label [[WHILE_COND_WHILE_END_CRIT_EDGE:%.*]]
+; CHECK: while.cond.while.end_crit_edge:
+; CHECK-NEXT: br label [[WHILE_END]]
+; CHECK: while.end:
+; CHECK-NEXT: ret i32 0
+;
entry:
-; CHECK: test()
-; CHECK: while.body:
-; CHECK: call void @RunInMode
-; CHECK: br i1 %tobool, label %while.cond.backedge, label %if.then
-; CHECK: while.cond.backedge:
-; CHECK: load i32, ptr %shouldExit
-; CHECK: br i1 %cmp, label %while.body
%shouldExit = alloca i32, align 4
%tasksIdle = alloca i32, align 4
store i32 0, ptr %shouldExit, align 4
diff --git a/llvm/test/Transforms/NewGVN/addrspacecast.ll b/llvm/test/Transforms/NewGVN/addrspacecast.ll
index fea8a2f..394db28 100644
--- a/llvm/test/Transforms/NewGVN/addrspacecast.ll
+++ b/llvm/test/Transforms/NewGVN/addrspacecast.ll
@@ -7,7 +7,7 @@ define ptr addrspace(1) @addrspacecast(ptr %ptr) {
; CHECK-NEXT: [[Z1:%.*]] = addrspacecast ptr [[PTR:%.*]] to ptr addrspace(1)
; CHECK-NEXT: br label [[BLOCK2:%.*]]
; CHECK: block2:
-; CHECK-NEXT: store ptr addrspace(1) [[Z1]], ptr undef
+; CHECK-NEXT: store ptr addrspace(1) [[Z1]], ptr undef, align 8
; CHECK-NEXT: ret ptr addrspace(1) [[Z1]]
;
block1:
@@ -29,7 +29,7 @@ define ptr addrspace(1) @addrspacecast_different_result_types(ptr %ptr) {
; CHECK-NEXT: br label [[BLOCK2:%.*]]
; CHECK: block2:
; CHECK-NEXT: [[Z2:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(1)
-; CHECK-NEXT: store ptr addrspace(2) [[Z1]], ptr undef
+; CHECK-NEXT: store ptr addrspace(2) [[Z1]], ptr undef, align 8
; CHECK-NEXT: ret ptr addrspace(1) [[Z2]]
;
block1:
@@ -48,7 +48,7 @@ define ptr addrspace(1) @addrspacecast_simplify(ptr addrspace(1) %ptr) {
; CHECK-NEXT: [[CAST0:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr
; CHECK-NEXT: br label [[BLOCK2:%.*]]
; CHECK: block2:
-; CHECK-NEXT: store ptr addrspace(1) [[PTR]], ptr undef
+; CHECK-NEXT: store ptr addrspace(1) [[PTR]], ptr undef, align 8
; CHECK-NEXT: ret ptr addrspace(1) [[PTR]]
;
block1:
@@ -70,7 +70,7 @@ define ptr addrspace(1) @addrspacecast_constant() {
; CHECK-NEXT: store ptr undef, ptr @h, align 4
; CHECK-NEXT: br label [[BLOCK2:%.*]]
; CHECK: block2:
-; CHECK-NEXT: store ptr addrspace(1) undef, ptr undef
+; CHECK-NEXT: store ptr addrspace(1) undef, ptr undef, align 8
; CHECK-NEXT: ret ptr addrspace(1) undef
;
block1:
@@ -88,11 +88,11 @@ block2:
define ptr addrspace(1) @addrspacecast_leader(ptr %arg.ptr) {
; CHECK-LABEL: @addrspacecast_leader(
; CHECK-NEXT: block1:
-; CHECK-NEXT: [[LOAD0:%.*]] = load ptr, ptr [[ARG_PTR:%.*]]
+; CHECK-NEXT: [[LOAD0:%.*]] = load ptr, ptr [[ARG_PTR:%.*]], align 8
; CHECK-NEXT: [[Z1:%.*]] = addrspacecast ptr [[LOAD0]] to ptr addrspace(1)
; CHECK-NEXT: br label [[BLOCK2:%.*]]
; CHECK: block2:
-; CHECK-NEXT: store ptr addrspace(1) [[Z1]], ptr undef
+; CHECK-NEXT: store ptr addrspace(1) [[Z1]], ptr undef, align 8
; CHECK-NEXT: ret ptr addrspace(1) [[Z1]]
;
block1:
diff --git a/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll b/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
index baef8b5..5319046 100644
--- a/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
+++ b/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
@@ -245,7 +245,7 @@ bb23: ; preds = %bb4
define i8 @irreducible_memoryphi(ptr noalias %arg, ptr noalias %arg2) {
; CHECK-LABEL: @irreducible_memoryphi(
; CHECK-NEXT: bb:
-; CHECK-NEXT: store i8 0, ptr [[ARG:%.*]]
+; CHECK-NEXT: store i8 0, ptr [[ARG:%.*]], align 1
; CHECK-NEXT: br i1 undef, label [[BB2:%.*]], label [[BB1:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[BB2]]
diff --git a/llvm/test/Transforms/NewGVN/basic-undef-test.ll b/llvm/test/Transforms/NewGVN/basic-undef-test.ll
index 5b731fc..148c022 100644
--- a/llvm/test/Transforms/NewGVN/basic-undef-test.ll
+++ b/llvm/test/Transforms/NewGVN/basic-undef-test.ll
@@ -1,15 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
; ModuleID = 'test3.ll'
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define i32 @main(ptr %foo) {
+; CHECK-LABEL: define i32 @main(
+; CHECK-SAME: ptr [[FOO:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[FOO]], align 4
+; CHECK-NEXT: store i32 5, ptr undef, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], [[TMP0]]
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
entry:
-; CHECK: load i32, ptr %foo, align 4
%0 = load i32, ptr %foo, align 4
store i32 5, ptr undef, align 4
-; CHECK-NOT: load i32, ptr %foo, align 4
%1 = load i32, ptr %foo, align 4
-; CHECK: add i32 %0, %0
%2 = add i32 %0, %1
ret i32 %2
}
diff --git a/llvm/test/Transforms/NewGVN/br-identical.ll b/llvm/test/Transforms/NewGVN/br-identical.ll
index c998385..23f43b0 100644
--- a/llvm/test/Transforms/NewGVN/br-identical.ll
+++ b/llvm/test/Transforms/NewGVN/br-identical.ll
@@ -1,8 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S -o - %s | FileCheck %s
; If a branch has two identical successors, we cannot declare either dead.
define void @widget(i1 %p) {
+; CHECK-LABEL: define void @widget(
+; CHECK-SAME: i1 [[P:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[BB2:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[T1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[T2:%.*]], [[BB7:%.*]] ]
+; CHECK-NEXT: [[T2]] = add i64 [[T1]], 1
+; CHECK-NEXT: [[T3:%.*]] = icmp ult i64 0, [[T2]]
+; CHECK-NEXT: br i1 [[T3]], label [[BB3:%.*]], label [[BB4:%.*]]
+; CHECK: bb3:
+; CHECK-NEXT: [[T4:%.*]] = call i64 @f()
+; CHECK-NEXT: br label [[BB4]]
+; CHECK: bb4:
+; CHECK-NEXT: br i1 [[P]], label [[BB5:%.*]], label [[BB6:%.*]]
+; CHECK: bb5:
+; CHECK-NEXT: br i1 true, label [[BB7]], label [[BB7]]
+; CHECK: bb6:
+; CHECK-NEXT: br i1 true, label [[BB7]], label [[BB7]]
+; CHECK: bb7:
+; CHECK-NEXT: br i1 [[P]], label [[BB2]], label [[BB8:%.*]]
+; CHECK: bb8:
+; CHECK-NEXT: ret void
+;
entry:
br label %bb2
@@ -17,7 +41,6 @@ bb3:
br label %bb4
bb4:
- ; CHECK-NOT: phi {{.*}} undef
%foo = phi i64 [ %t4, %bb3 ], [ 0, %bb2 ]
br i1 %p, label %bb5, label %bb6
diff --git a/llvm/test/Transforms/NewGVN/calloc-load-removal.ll b/llvm/test/Transforms/NewGVN/calloc-load-removal.ll
index a8a1e66..608f739 100644
--- a/llvm/test/Transforms/NewGVN/calloc-load-removal.ll
+++ b/llvm/test/Transforms/NewGVN/calloc-load-removal.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S -passes=newgvn < %s | FileCheck %s
; Check that loads from calloc are recognized as being zero.
@@ -5,14 +6,15 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Function Attrs: nounwind uwtable
define i32 @test1() {
+; CHECK-LABEL: define i32 @test1() {
+; CHECK-NEXT: [[TMP1:%.*]] = tail call noalias ptr @calloc(i64 1, i64 4)
+; CHECK-NEXT: ret i32 0
+;
%1 = tail call noalias ptr @calloc(i64 1, i64 4)
; This load is trivially constant zero
%2 = load i32, ptr %1, align 4
ret i32 %2
-; CHECK-LABEL: @test1(
-; CHECK-NOT: %2 = load i32, ptr %1, align 4
-; CHECK: ret i32 0
}
declare noalias ptr @calloc(i64, i64) mustprogress nofree nounwind willreturn allockind("alloc,zeroed") allocsize(0,1) "alloc-family"="malloc"
diff --git a/llvm/test/Transforms/NewGVN/calls-readonly.ll b/llvm/test/Transforms/NewGVN/calls-readonly.ll
index 68d74c1..49f5d3a 100644
--- a/llvm/test/Transforms/NewGVN/calls-readonly.ll
+++ b/llvm/test/Transforms/NewGVN/calls-readonly.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
; Should delete the second call to strlen even though the intervening strchr call exists.
@@ -5,6 +6,22 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
target triple = "i386-apple-darwin7"
define ptr @test(ptr %P, ptr %Q, i32 %x, i32 %y) nounwind readonly {
+; CHECK-LABEL: define ptr @test(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @strlen(ptr [[P]])
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; CHECK-NEXT: br i1 [[TMP1]], label [[BB:%.*]], label [[BB1:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: [[TMP2:%.*]] = sdiv i32 [[X]], [[Y]]
+; CHECK-NEXT: br label [[BB1]]
+; CHECK: bb1:
+; CHECK-NEXT: [[X_ADDR_0:%.*]] = phi i32 [ [[TMP2]], [[BB]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr [[Q]], i32 97)
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[X_ADDR_0]], [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP3]], i32 [[X_ADDR_0]]
+; CHECK-NEXT: ret ptr [[TMP5]]
+;
entry:
%0 = tail call i32 @strlen(ptr %P) ; <i32> [#uses=2]
%1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
@@ -24,21 +41,6 @@ bb1: ; preds = %bb, %entry
ret ptr %6
}
-; CHECK: define ptr @test(ptr %P, ptr %Q, i32 %x, i32 %y) #0 {
-; CHECK: entry:
-; CHECK-NEXT: %0 = tail call i32 @strlen(ptr %P)
-; CHECK-NEXT: %1 = icmp eq i32 %0, 0
-; CHECK-NEXT: br i1 %1, label %bb, label %bb1
-; CHECK: bb:
-; CHECK-NEXT: %2 = sdiv i32 %x, %y
-; CHECK-NEXT: br label %bb1
-; CHECK: bb1:
-; CHECK-NEXT: %x_addr.0 = phi i32 [ %2, %bb ], [ %x, %entry ]
-; CHECK-NEXT: %3 = tail call ptr @strchr(ptr %Q, i32 97)
-; CHECK-NEXT: %4 = add i32 %x_addr.0, %0
-; CHECK-NEXT: %5 = getelementptr i8, ptr %3, i32 %x_addr.0
-; CHECK-NEXT: ret ptr %5
-; CHECK: }
declare i32 @strlen(ptr) nounwind readonly
diff --git a/llvm/test/Transforms/NewGVN/completeness.ll b/llvm/test/Transforms/NewGVN/completeness.ll
index d968c78..4841e2e 100644
--- a/llvm/test/Transforms/NewGVN/completeness.ll
+++ b/llvm/test/Transforms/NewGVN/completeness.ll
@@ -6,9 +6,12 @@ define i32 @test1(i32, ptr) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP0:%.*]], 0
; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP5:%.*]]
-; CHECK: br label [[TMP6:%.*]]
-; CHECK: br label [[TMP6]]
-; CHECK: [[PHIOFOPS:%.*]] = phi i32 [ 105, [[TMP5]] ], [ 75, [[TMP4]] ]
+; CHECK: 4:
+; CHECK-NEXT: br label [[TMP6:%.*]]
+; CHECK: 5:
+; CHECK-NEXT: br label [[TMP6]]
+; CHECK: 6:
+; CHECK-NEXT: [[PHIOFOPS:%.*]] = phi i32 [ 105, [[TMP5]] ], [ 75, [[TMP4]] ]
; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ 5, [[TMP4]] ], [ 7, [[TMP5]] ]
; CHECK-NEXT: ret i32 [[PHIOFOPS]]
;
@@ -31,9 +34,12 @@ define i32 @test1b(i32, ptr) {
; CHECK-LABEL: @test1b(
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP0:%.*]], 0
; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP5:%.*]]
-; CHECK: br label [[TMP6:%.*]]
-; CHECK: br label [[TMP6]]
-; CHECK: [[PHIOFOPS1:%.*]] = phi i32 [ 105, [[TMP5]] ], [ 75, [[TMP4]] ]
+; CHECK: 4:
+; CHECK-NEXT: br label [[TMP6:%.*]]
+; CHECK: 5:
+; CHECK-NEXT: br label [[TMP6]]
+; CHECK: 6:
+; CHECK-NEXT: [[PHIOFOPS1:%.*]] = phi i32 [ 105, [[TMP5]] ], [ 75, [[TMP4]] ]
; CHECK-NEXT: [[PHIOFOPS:%.*]] = phi i32 [ 1575, [[TMP5]] ], [ 1125, [[TMP4]] ]
; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ 5, [[TMP4]] ], [ 7, [[TMP5]] ]
; CHECK-NEXT: ret i32 [[PHIOFOPS]]
@@ -58,9 +64,12 @@ define i32 @test2(i32) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 0
; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
-; CHECK: br label [[TMP5:%.*]]
-; CHECK: br label [[TMP5]]
-; CHECK: [[DOT01:%.*]] = phi i32 [ 3, [[TMP3]] ], [ 2, [[TMP4]] ]
+; CHECK: 3:
+; CHECK-NEXT: br label [[TMP5:%.*]]
+; CHECK: 4:
+; CHECK-NEXT: br label [[TMP5]]
+; CHECK: 5:
+; CHECK-NEXT: [[DOT01:%.*]] = phi i32 [ 3, [[TMP3]] ], [ 2, [[TMP4]] ]
; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ 2, [[TMP3]] ], [ 3, [[TMP4]] ]
; CHECK-NEXT: ret i32 5
;
@@ -158,9 +167,12 @@ define i32 @test4(i32, ptr, ptr noalias, ptr noalias) {
; CHECK-NEXT: store i32 7, ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP0:%.*]], 0
; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP7:%.*]]
-; CHECK: br label [[TMP8:%.*]]
-; CHECK: br label [[TMP8]]
-; CHECK: [[DOT01:%.*]] = phi i32 [ 5, [[TMP6]] ], [ 7, [[TMP7]] ]
+; CHECK: 6:
+; CHECK-NEXT: br label [[TMP8:%.*]]
+; CHECK: 7:
+; CHECK-NEXT: br label [[TMP8]]
+; CHECK: 8:
+; CHECK-NEXT: [[DOT01:%.*]] = phi i32 [ 5, [[TMP6]] ], [ 7, [[TMP7]] ]
; CHECK-NEXT: [[DOT0:%.*]] = phi ptr [ [[TMP2]], [[TMP6]] ], [ [[TMP3]], [[TMP7]] ]
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOT0]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = mul nsw i32 [[TMP9]], 15
@@ -287,19 +299,19 @@ bb28: ; preds = %bb27, %bb
define i8 @test6(ptr %addr) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry-block:
-; CHECK-NEXT: br label %main-loop
+; CHECK-NEXT: br label [[MAIN_LOOP:%.*]]
; CHECK: main-loop:
-; CHECK-NEXT: [[PHIOFOPS1:%.*]] = phi i1 [ true, %entry-block ], [ false, [[CORE:%.*]] ]
-; CHECK-NEXT: [[PHIOFOPS:%.*]] = phi i1 [ false, %entry-block ], [ true, [[CORE]] ]
-; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ 0, %entry-block ], [ 1, [[CORE]] ]
-; CHECK-NEXT: store volatile i8 0, ptr [[ADDR:%.*]]
-; CHECK-NEXT: br i1 [[PHIOFOPS1]], label %busy-wait-phi-0, label [[EXIT:%.*]]
+; CHECK-NEXT: [[PHIOFOPS1:%.*]] = phi i1 [ true, [[ENTRY_BLOCK:%.*]] ], [ false, [[CORE:%.*]] ]
+; CHECK-NEXT: [[PHIOFOPS:%.*]] = phi i1 [ false, [[ENTRY_BLOCK]] ], [ true, [[CORE]] ]
+; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ 0, [[ENTRY_BLOCK]] ], [ 1, [[CORE]] ]
+; CHECK-NEXT: store volatile i8 0, ptr [[ADDR:%.*]], align 1
+; CHECK-NEXT: br i1 [[PHIOFOPS1]], label [[BUSY_WAIT_PHI_0:%.*]], label [[EXIT:%.*]]
; CHECK: busy-wait-phi-0:
-; CHECK-NEXT: [[LOAD:%.*]] = load volatile i8, ptr [[ADDR]]
+; CHECK-NEXT: [[LOAD:%.*]] = load volatile i8, ptr [[ADDR]], align 1
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i8 [[LOAD]], 0
-; CHECK-NEXT: br i1 [[ICMP]], label %busy-wait-phi-0, label [[CORE]]
+; CHECK-NEXT: br i1 [[ICMP]], label [[BUSY_WAIT_PHI_0]], label [[CORE]]
; CHECK: core:
-; CHECK-NEXT: br i1 [[PHIOFOPS]], label [[TRAP:%.*]], label %main-loop
+; CHECK-NEXT: br i1 [[PHIOFOPS]], label [[TRAP:%.*]], label [[MAIN_LOOP]]
; CHECK: trap:
; CHECK-NEXT: ret i8 1
; CHECK: exit:
@@ -507,13 +519,13 @@ declare ptr @wombat()
define void @test12(ptr %p) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr %p
+; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP]], 0
; CHECK-NEXT: br i1 [[TMP1]], label [[BB2:%.*]], label [[BB8:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
-; CHECK-NEXT: br i1 true, label [[BB6:%.*]], label [[BB7]]
+; CHECK-NEXT: br i1 true, label [[BB6:%.*]], label [[BB7:%.*]]
; CHECK: bb6:
; CHECK-NEXT: br label [[BB7]]
; CHECK: bb7:
@@ -551,7 +563,7 @@ define void @test13() {
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP:%.*]] = load i8, ptr null
+; CHECK-NEXT: [[TMP:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
; CHECK-NEXT: [[PHIOFOPS:%.*]] = phi i8 [ [[TMP]], [[BB1]] ], [ [[TMP10:%.*]], [[BB3]] ]
@@ -560,7 +572,7 @@ define void @test13() {
; CHECK-NEXT: [[TMP6]] = getelementptr i8, ptr [[TMP4]], i64 1
; CHECK-NEXT: [[TMP8:%.*]] = sext i8 [[PHIOFOPS]] to i32
; CHECK-NEXT: [[TMP9]] = mul i32 [[TMP5]], [[TMP8]]
-; CHECK-NEXT: [[TMP10]] = load i8, ptr [[TMP6]]
+; CHECK-NEXT: [[TMP10]] = load i8, ptr [[TMP6]], align 1
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i8 [[TMP10]], 0
; CHECK-NEXT: br i1 [[TMP11]], label [[BB12:%.*]], label [[BB3]]
; CHECK: bb12:
diff --git a/llvm/test/Transforms/NewGVN/cond_br.ll b/llvm/test/Transforms/NewGVN/cond_br.ll
index 3dbeb39..930e5b3 100644
--- a/llvm/test/Transforms/NewGVN/cond_br.ll
+++ b/llvm/test/Transforms/NewGVN/cond_br.ll
@@ -1,12 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
@y = external global i32
@z = external global i32
; Function Attrs: nounwind ssp uwtable
define void @foo(i32 %x) {
-; CHECK: @foo(i32 %x)
-; CHECK: %.pre = load i32, ptr @y
-; CHECK: call void @bar(i32 %.pre)
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT: br i1 false, label [[IF_THEN:%.*]], label [[ENTRY_IF_END_CRIT_EDGE:%.*]]
+; CHECK: entry.if.end_crit_edge:
+; CHECK-NEXT: br label [[IF_END:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: tail call void @bar(i32 [[DOTPRE]])
+; CHECK-NEXT: ret void
+;
%t = sub i32 %x, %x
%.pre = load i32, ptr @y, align 4
@@ -28,9 +39,21 @@ if.end: ; preds = %entry.if.end_crit_e
}
define void @foo2(i32 %x) {
-; CHECK: @foo2(i32 %x)
-; CHECK: %.pre = load i32, ptr @y
-; CHECK: tail call void @bar(i32 %.pre)
+; CHECK-LABEL: define void @foo2(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT: br i1 false, label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br label [[IF_END:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: store i32 1, ptr @z, align 4
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: tail call void @bar(i32 [[DOTPRE]])
+; CHECK-NEXT: ret void
+;
entry:
%t = sub i32 %x, %x
%.pre = load i32, ptr @y, align 4
diff --git a/llvm/test/Transforms/NewGVN/condprop.ll b/llvm/test/Transforms/NewGVN/condprop.ll
index e685dfe..d97fd38 100644
--- a/llvm/test/Transforms/NewGVN/condprop.ll
+++ b/llvm/test/Transforms/NewGVN/condprop.ll
@@ -134,11 +134,11 @@ define void @test4(i1 %b, i32 %x) {
; CHECK-NEXT: br i1 [[B:%.*]], label [[SW:%.*]], label [[CASE3:%.*]]
; CHECK: sw:
; CHECK-NEXT: switch i32 [[X:%.*]], label [[DEFAULT:%.*]] [
-; CHECK-NEXT: i32 0, label [[CASE0:%.*]]
-; CHECK-NEXT: i32 1, label [[CASE1:%.*]]
-; CHECK-NEXT: i32 2, label [[CASE0]]
-; CHECK-NEXT: i32 3, label [[CASE3]]
-; CHECK-NEXT: i32 4, label [[DEFAULT]]
+; CHECK-NEXT: i32 0, label [[CASE0:%.*]]
+; CHECK-NEXT: i32 1, label [[CASE1:%.*]]
+; CHECK-NEXT: i32 2, label [[CASE0]]
+; CHECK-NEXT: i32 3, label [[CASE3]]
+; CHECK-NEXT: i32 4, label [[DEFAULT]]
; CHECK-NEXT: ]
; CHECK: default:
; CHECK-NEXT: call void @bar(i32 [[X]])
diff --git a/llvm/test/Transforms/NewGVN/crash-no-aa.ll b/llvm/test/Transforms/NewGVN/crash-no-aa.ll
index 55e2bcb..30f2e37 100644
--- a/llvm/test/Transforms/NewGVN/crash-no-aa.ll
+++ b/llvm/test/Transforms/NewGVN/crash-no-aa.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -disable-basic-aa -passes=newgvn -S < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/llvm/test/Transforms/NewGVN/crash-usecounts.ll b/llvm/test/Transforms/NewGVN/crash-usecounts.ll
index 5527bea..5cae740 100644
--- a/llvm/test/Transforms/NewGVN/crash-usecounts.ll
+++ b/llvm/test/Transforms/NewGVN/crash-usecounts.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -disable-output < %s
define void @test(i1 %arg, i1 %arg1) {
diff --git a/llvm/test/Transforms/NewGVN/crash.ll b/llvm/test/Transforms/NewGVN/crash.ll
index c886bd3..26eaa76 100644
--- a/llvm/test/Transforms/NewGVN/crash.ll
+++ b/llvm/test/Transforms/NewGVN/crash.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -disable-output < %s
; PR5631
@@ -106,7 +107,7 @@ if.then21.i:
ret ptr undef
do.body36.i:
- %ivar38.i = load i64, ptr @g
+ %ivar38.i = load i64, ptr @g
%add.ptr39.sum.i = add i64 %ivar38.i, 8
%tmp40.i = getelementptr inbounds i8, ptr %tmp18.i, i64 %add.ptr39.sum.i
%tmp41.i = load i64, ptr %tmp40.i
@@ -132,14 +133,14 @@ declare i32 @foo2()
define i32 @test4() {
entry:
ret i32 0
-
+
dead:
%P2 = getelementptr i32, ptr %P2, i32 52
%Q2 = getelementptr i32, ptr %Q2, i32 52
store i32 4, ptr %P2
%A = load i32, ptr %Q2
br i1 true, label %dead, label %dead2
-
+
dead2:
ret i32 %A
}
diff --git a/llvm/test/Transforms/NewGVN/cyclic-phi-handling.ll b/llvm/test/Transforms/NewGVN/cyclic-phi-handling.ll
index 4a2f0b9..dc15079 100644
--- a/llvm/test/Transforms/NewGVN/cyclic-phi-handling.ll
+++ b/llvm/test/Transforms/NewGVN/cyclic-phi-handling.ll
@@ -5,15 +5,15 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define void @foo(i32 %arg, i32 %arg1, ptr %arg2) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: bb:
-; CHECK-NEXT: br label %bb3
+; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ %arg1, %bb ], [ [[TMP:%.*]]4, %bb7 ]
-; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ %arg, %bb ], [ [[TMP]], %bb7 ]
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 %arg2(i32 [[TMP4]], i32 [[TMP]])
+; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ [[ARG1:%.*]], [[BB:%.*]] ], [ [[TMP4:%.*]], [[BB7:%.*]] ]
+; CHECK-NEXT: [[TMP4]] = phi i32 [ [[ARG:%.*]], [[BB]] ], [ [[TMP]], [[BB7]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 [[ARG2:%.*]](i32 [[TMP4]], i32 [[TMP]])
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
-; CHECK-NEXT: br i1 [[TMP6]], label %bb7, label %bb8
+; CHECK-NEXT: br i1 [[TMP6]], label [[BB7]], label [[BB8:%.*]]
; CHECK: bb7:
-; CHECK-NEXT: br label %bb3
+; CHECK-NEXT: br label [[BB3]]
; CHECK: bb8:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/NewGVN/dbg-redundant-load.ll b/llvm/test/Transforms/NewGVN/dbg-redundant-load.ll
index 01d95ae..cd2eca0 100644
--- a/llvm/test/Transforms/NewGVN/dbg-redundant-load.ll
+++ b/llvm/test/Transforms/NewGVN/dbg-redundant-load.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
; Check that the redundant load from %if.then is removed.
@@ -6,15 +7,22 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-; CHECK: @test_redundant_load(
-; CHECK-LABEL: entry:
-; CHECK-NEXT: load i32, ptr %Y, align 4, !dbg ![[LOC:[0-9]+]]
-; CHECK-LABEL: if.then:
-; CHECK-NOT: load
-; CHECK-LABEL: if.end:
-; CHECK: ![[LOC]] = !DILocation(line: 3, scope: !{{.*}})
define i32 @test_redundant_load(i32 %X, ptr %Y) !dbg !6 {
+; CHECK-LABEL: define i32 @test_redundant_load(
+; CHECK-SAME: i32 [[X:%.*]], ptr [[Y:%.*]]) !dbg [[DBG6:![0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4, !dbg [[DBG8:![0-9]+]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], -1, !dbg [[DBG9:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]], !dbg [[DBG9]]
+; CHECK: if.then:
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP0]], !dbg [[DBG10:![0-9]+]]
+; CHECK-NEXT: call void @foo(), !dbg [[DBG11:![0-9]+]]
+; CHECK-NEXT: br label [[IF_END]], !dbg [[DBG12:![0-9]+]]
+; CHECK: if.end:
+; CHECK-NEXT: [[RESULT_0:%.*]] = phi i32 [ [[ADD]], [[IF_THEN]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RESULT_0]], !dbg [[DBG13:![0-9]+]]
+;
entry:
%0 = load i32, ptr %Y, align 4, !dbg !8
%cmp = icmp sgt i32 %X, -1, !dbg !9
@@ -50,3 +58,16 @@ declare void @foo()
!11 = !DILocation(line: 7, scope: !6)
!12 = !DILocation(line: 8, scope: !6)
!13 = !DILocation(line: 10, scope: !6)
+;.
+; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: [[META1:![0-9]+]], isOptimized: false, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: [[META2:![0-9]+]])
+; CHECK: [[META1]] = !DIFile(filename: "test.cpp", directory: "")
+; CHECK: [[META2]] = !{}
+; CHECK: [[DBG6]] = distinct !DISubprogram(name: "test_redundant_load", scope: [[META1]], file: [[META1]], line: 2, type: [[META7:![0-9]+]], scopeLine: 2, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META0]], retainedNodes: [[META2]])
+; CHECK: [[META7]] = !DISubroutineType(types: [[META2]])
+; CHECK: [[DBG8]] = !DILocation(line: 3, scope: [[DBG6]])
+; CHECK: [[DBG9]] = !DILocation(line: 5, scope: [[DBG6]])
+; CHECK: [[DBG10]] = !DILocation(line: 6, scope: [[DBG6]])
+; CHECK: [[DBG11]] = !DILocation(line: 7, scope: [[DBG6]])
+; CHECK: [[DBG12]] = !DILocation(line: 8, scope: [[DBG6]])
+; CHECK: [[DBG13]] = !DILocation(line: 10, scope: [[DBG6]])
+;.
diff --git a/llvm/test/Transforms/NewGVN/edge.ll b/llvm/test/Transforms/NewGVN/edge.ll
index 8699c85..143e52c 100644
--- a/llvm/test/Transforms/NewGVN/edge.ll
+++ b/llvm/test/Transforms/NewGVN/edge.ll
@@ -1,7 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
define i32 @f1(i32 %x) {
- ; CHECK-LABEL: define i32 @f1(
+; CHECK-LABEL: define i32 @f1(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[BB2:%.*]], label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: ret i32 [[X]]
+;
bb0:
%cmp = icmp eq i32 %x, 0
br i1 %cmp, label %bb2, label %bb1
@@ -11,12 +21,19 @@ bb2:
%cond = phi i32 [ %x, %bb0 ], [ 0, %bb1 ]
%foo = add i32 %cond, %x
ret i32 %foo
- ; CHECK: bb2:
- ; CHECK: ret i32 %x
}
define i32 @f2(i32 %x) {
- ; CHECK-LABEL: define i32 @f2(
+; CHECK-LABEL: define i32 @f2(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[X]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: ret i32 [[X]]
+;
bb0:
%cmp = icmp ne i32 %x, 0
br i1 %cmp, label %bb1, label %bb2
@@ -26,12 +43,20 @@ bb2:
%cond = phi i32 [ %x, %bb0 ], [ 0, %bb1 ]
%foo = add i32 %cond, %x
ret i32 %foo
- ; CHECK: bb2:
- ; CHECK: ret i32 %x
}
define i32 @f3(i32 %x) {
- ; CHECK-LABEL: define i32 @f3(
+; CHECK-LABEL: define i32 @f3(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: switch i32 [[X]], label [[BB1:%.*]] [
+; CHECK-NEXT: i32 0, label [[BB2:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: ret i32 [[X]]
+;
bb0:
switch i32 %x, label %bb1 [ i32 0, label %bb2]
bb1:
@@ -40,13 +65,21 @@ bb2:
%cond = phi i32 [ %x, %bb0 ], [ 0, %bb1 ]
%foo = add i32 %cond, %x
ret i32 %foo
- ; CHECK: bb2:
- ; CHECK: ret i32 %x
}
declare void @g(i1)
define void @f4(ptr %x) {
; CHECK-LABEL: define void @f4(
+; CHECK-SAME: ptr [[X:%.*]]) {
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: [[Y:%.*]] = icmp eq ptr null, [[X]]
+; CHECK-NEXT: br i1 [[Y]], label [[BB2:%.*]], label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: call void @g(i1 [[Y]])
+; CHECK-NEXT: ret void
+;
bb0:
%y = icmp eq ptr null, %x
br i1 %y, label %bb2, label %bb1
@@ -55,11 +88,22 @@ bb1:
bb2:
%zed = icmp eq ptr null, %x
call void @g(i1 %zed)
-; CHECK: call void @g(i1 %y)
ret void
}
define double @fcmp_oeq_not_zero(double %x, double %y) {
+; CHECK-LABEL: define double @fcmp_oeq_not_zero(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double [[Y]], 2.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[RETURN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X]], 2.000000e+00
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[IF]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
entry:
%cmp = fcmp oeq double %y, 2.0
br i1 %cmp, label %if, label %return
@@ -72,11 +116,21 @@ return:
%retval = phi double [ %div, %if ], [ %x, %entry ]
ret double %retval
-; CHECK-LABEL: define double @fcmp_oeq_not_zero(
-; CHECK: %div = fdiv double %x, 2.0
}
define double @fcmp_une_not_zero(double %x, double %y) {
+; CHECK-LABEL: define double @fcmp_une_not_zero(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double [[Y]], 2.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[ELSE:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X]], 2.000000e+00
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[ELSE]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
entry:
%cmp = fcmp une double %y, 2.0
br i1 %cmp, label %return, label %else
@@ -89,14 +143,24 @@ return:
%retval = phi double [ %div, %else ], [ %x, %entry ]
ret double %retval
-; CHECK-LABEL: define double @fcmp_une_not_zero(
-; CHECK: %div = fdiv double %x, 2.0
}
-; PR22376 - We can't propagate zero constants because -0.0
+; PR22376 - We can't propagate zero constants because -0.0
; compares equal to 0.0. If %y is -0.0 in this test case,
; we would produce the wrong sign on the infinity return value.
define double @fcmp_oeq_zero(double %x, double %y) {
+; CHECK-LABEL: define double @fcmp_oeq_zero(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double [[Y]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[RETURN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X]], [[Y]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[IF]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
entry:
%cmp = fcmp oeq double %y, 0.0
br i1 %cmp, label %if, label %return
@@ -109,11 +173,21 @@ return:
%retval = phi double [ %div, %if ], [ %x, %entry ]
ret double %retval
-; CHECK-LABEL: define double @fcmp_oeq_zero(
-; CHECK: %div = fdiv double %x, %y
}
define double @fcmp_une_zero(double %x, double %y) {
+; CHECK-LABEL: define double @fcmp_une_zero(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double [[Y]], -0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[ELSE:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X]], [[Y]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[ELSE]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
entry:
%cmp = fcmp une double %y, -0.0
br i1 %cmp, label %return, label %else
@@ -126,45 +200,65 @@ return:
%retval = phi double [ %div, %else ], [ %x, %entry ]
ret double %retval
-; CHECK-LABEL: define double @fcmp_une_zero(
-; CHECK: %div = fdiv double %x, %y
}
; We also cannot propagate a value if it's not a constant.
; This is because the value could be 0.0 or -0.0.
define double @fcmp_oeq_maybe_zero(double %x, double %y, double %z1, double %z2) {
+; CHECK-LABEL: define double @fcmp_oeq_maybe_zero(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]], double [[Z1:%.*]], double [[Z2:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[Z:%.*]] = fadd double [[Z1]], [[Z2]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double [[Y]], [[Z]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[RETURN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X]], [[Z]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[IF]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
entry:
- %z = fadd double %z1, %z2
- %cmp = fcmp oeq double %y, %z
- br i1 %cmp, label %if, label %return
+ %z = fadd double %z1, %z2
+ %cmp = fcmp oeq double %y, %z
+ br i1 %cmp, label %if, label %return
if:
- %div = fdiv double %x, %z
- br label %return
+ %div = fdiv double %x, %z
+ br label %return
return:
- %retval = phi double [ %div, %if ], [ %x, %entry ]
- ret double %retval
+ %retval = phi double [ %div, %if ], [ %x, %entry ]
+ ret double %retval
-; CHECK-LABEL: define double @fcmp_oeq_maybe_zero(
-; CHECK: %div = fdiv double %x, %z
}
define double @fcmp_une_maybe_zero(double %x, double %y, double %z1, double %z2) {
+; CHECK-LABEL: define double @fcmp_une_maybe_zero(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]], double [[Z1:%.*]], double [[Z2:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[Z:%.*]] = fadd double [[Z1]], [[Z2]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double [[Y]], [[Z]]
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[ELSE:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X]], [[Z]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[ELSE]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
entry:
- %z = fadd double %z1, %z2
- %cmp = fcmp une double %y, %z
- br i1 %cmp, label %return, label %else
+ %z = fadd double %z1, %z2
+ %cmp = fcmp une double %y, %z
+ br i1 %cmp, label %return, label %else
else:
- %div = fdiv double %x, %z
- br label %return
+ %div = fdiv double %x, %z
+ br label %return
return:
- %retval = phi double [ %div, %else ], [ %x, %entry ]
- ret double %retval
+ %retval = phi double [ %div, %else ], [ %x, %entry ]
+ ret double %retval
-; CHECK-LABEL: define double @fcmp_une_maybe_zero(
-; CHECK: %div = fdiv double %x, %z
}
diff --git a/llvm/test/Transforms/NewGVN/eliminate-callsite-inline.ll b/llvm/test/Transforms/NewGVN/eliminate-callsite-inline.ll
index 748485c..6cf5438 100644
--- a/llvm/test/Transforms/NewGVN/eliminate-callsite-inline.ll
+++ b/llvm/test/Transforms/NewGVN/eliminate-callsite-inline.ll
@@ -1,15 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=inline,newgvn -S < %s | FileCheck %s
-; CHECK-LABEL: @f2()
-; CHECK-NEXT: ret void
define void @f2() {
+; CHECK-LABEL: define void @f2() {
+; CHECK-NEXT: ret void
+;
call void @f1()
call void @f1()
ret void
}
define internal void @f1() #1 {
+; CHECK-LABEL: define internal void @f1(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret void
+;
entry:
ret void
}
diff --git a/llvm/test/Transforms/NewGVN/equivalent-phi.ll b/llvm/test/Transforms/NewGVN/equivalent-phi.ll
index 925795d..ba4fc14 100644
--- a/llvm/test/Transforms/NewGVN/equivalent-phi.ll
+++ b/llvm/test/Transforms/NewGVN/equivalent-phi.ll
@@ -11,22 +11,22 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define i32 @bar(i32 %arg, i32 %arg1, i32 %arg2) #0 {
; CHECK-LABEL: @bar(
; CHECK-NEXT: bb:
-; CHECK-NEXT: br label %bb3
+; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ %arg, %bb ], [ [[TMP:%.*]]15, %bb17 ]
-; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ %arg2, %bb ], [ [[TMP18:%.*]], %bb17 ]
-; CHECK-NEXT: [[TMP6:%.*]] = phi i32 [ 0, %bb ], [ [[TMP14:%.*]], %bb17 ]
+; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ [[ARG:%.*]], [[BB:%.*]] ], [ [[TMP15:%.*]], [[BB17:%.*]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[ARG2:%.*]], [[BB]] ], [ [[TMP18:%.*]], [[BB17]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = phi i32 [ 0, [[BB]] ], [ [[TMP14:%.*]], [[BB17]] ]
; CHECK-NEXT: [[TMP7:%.*]] = sext i32 [[TMP]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1024 x i32], ptr @global, i64 0, i64 [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = add nsw i32 [[TMP6]], [[TMP9]]
; CHECK-NEXT: [[TMP14]] = add nsw i32 [[TMP10]], [[TMP9]]
-; CHECK-NEXT: [[TMP15:%.*]] = add nsw i32 [[TMP]], %arg1
-; CHECK-NEXT: br label %bb17
+; CHECK-NEXT: [[TMP15]] = add nsw i32 [[TMP]], [[ARG1:%.*]]
+; CHECK-NEXT: br label [[BB17]]
; CHECK: bb17:
; CHECK-NEXT: [[TMP18]] = add i32 [[TMP4]], -1
; CHECK-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP4]], 0
-; CHECK-NEXT: br i1 [[TMP19]], label %bb3, label %bb20
+; CHECK-NEXT: br i1 [[TMP19]], label [[BB3]], label [[BB20:%.*]]
; CHECK: bb20:
; CHECK-NEXT: ret i32 [[TMP14]]
;
diff --git a/llvm/test/Transforms/NewGVN/fold-const-expr.ll b/llvm/test/Transforms/NewGVN/fold-const-expr.ll
index 2821791..54020b88d 100644
--- a/llvm/test/Transforms/NewGVN/fold-const-expr.ll
+++ b/llvm/test/Transforms/NewGVN/fold-const-expr.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; GVN failed to do constant expression folding and expanded
; them unfolded in many places, producing exponentially large const
; expressions. As a result, the compilation never fisished.
@@ -6,6 +7,16 @@
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
%2 = type { i32, i32, i32, i32, i32 }
define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @_Z16vector3util_mainv(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds [[TMP0]], ptr [[TMP1]], i64 0, i32 1
+; CHECK-NEXT: store <4 x i32> <i32 234567891, i32 345678912, i32 456789123, i32 0>, ptr [[TMP114]], align 4
+; CHECK-NEXT: store i32 310393545, ptr [[TMP114]], align 4
+; CHECK-NEXT: store i32 -383584258, ptr [[TMP114]], align 4
+; CHECK-NEXT: store i32 -57163022, ptr [[TMP114]], align 4
+; CHECK-NEXT: ret i32 0
+;
%tmp1 = alloca %2, align 4
%tmp114 = getelementptr inbounds %2, ptr %tmp1, i64 0, i32 1
store <4 x i32> <i32 234567891, i32 345678912, i32 456789123, i32 0>, ptr %tmp114, align 4
@@ -36,7 +47,6 @@ define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) {
%tmp1739 = shl i32 %tmp1738, 22
%tmp1740 = xor i32 %tmp1739, %tmp1738
store i32 %tmp1740, ptr %tmp1683, align 4
-; CHECK: store i32 310393545, ptr %tmp114, align 4
%tmp1756 = getelementptr inbounds %2, ptr %tmp1, i64 0, i32 1
%tmp1761 = load i32, ptr %tmp1756, align 4
%tmp1766 = shl i32 %tmp1761, 5
@@ -64,7 +74,6 @@ define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) {
%tmp1812 = shl i32 %tmp1811, 22
%tmp1813 = xor i32 %tmp1812, %tmp1811
store i32 %tmp1813, ptr %tmp1756, align 4
-; CHECK: store i32 -383584258, ptr %tmp114, align 4
%tmp2645 = getelementptr inbounds %2, ptr %tmp1, i64 0, i32 1
%tmp2650 = load i32, ptr %tmp2645, align 4
%tmp2655 = shl i32 %tmp2650, 5
@@ -92,6 +101,5 @@ define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) {
%tmp2701 = shl i32 %tmp2700, 22
%tmp2702 = xor i32 %tmp2701, %tmp2700
store i32 %tmp2702, ptr %tmp2645, align 4
-; CHECK: store i32 -57163022, ptr %tmp114, align 4
ret i32 0
}
diff --git a/llvm/test/Transforms/NewGVN/fpmath.ll b/llvm/test/Transforms/NewGVN/fpmath.ll
index e8cec8a..d936c01 100644
--- a/llvm/test/Transforms/NewGVN/fpmath.ll
+++ b/llvm/test/Transforms/NewGVN/fpmath.ll
@@ -1,10 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
define double @test1(double %x, double %y) {
-; CHECK: @test1(double %x, double %y)
-; CHECK: %add1 = fadd double %x, %y
-; CHECK-NOT: fpmath
-; CHECK: %foo = fadd double %add1, %add1
+; CHECK-LABEL: define double @test1(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]]
+; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]]
+; CHECK-NEXT: ret double [[FOO]]
+;
%add1 = fadd double %x, %y, !fpmath !0
%add2 = fadd double %x, %y
%foo = fadd double %add1, %add2
@@ -12,9 +15,12 @@ define double @test1(double %x, double %y) {
}
define double @test2(double %x, double %y) {
-; CHECK: @test2(double %x, double %y)
-; CHECK: %add1 = fadd double %x, %y, !fpmath ![[MD0:[0-9]+]]
-; CHECK: %foo = fadd double %add1, %add1
+; CHECK-LABEL: define double @test2(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]], !fpmath [[META0:![0-9]+]]
+; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]]
+; CHECK-NEXT: ret double [[FOO]]
+;
%add1 = fadd double %x, %y, !fpmath !0
%add2 = fadd double %x, %y, !fpmath !0
%foo = fadd double %add1, %add2
@@ -22,9 +28,12 @@ define double @test2(double %x, double %y) {
}
define double @test3(double %x, double %y) {
-; CHECK: @test3(double %x, double %y)
-; CHECK: %add1 = fadd double %x, %y, !fpmath ![[MD1:[0-9]+]]
-; CHECK: %foo = fadd double %add1, %add1
+; CHECK-LABEL: define double @test3(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]], !fpmath [[META1:![0-9]+]]
+; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]]
+; CHECK-NEXT: ret double [[FOO]]
+;
%add1 = fadd double %x, %y, !fpmath !1
%add2 = fadd double %x, %y, !fpmath !0
%foo = fadd double %add1, %add2
@@ -32,9 +41,12 @@ define double @test3(double %x, double %y) {
}
define double @test4(double %x, double %y) {
-; CHECK: @test4(double %x, double %y)
-; CHECK: %add1 = fadd double %x, %y, !fpmath ![[MD1]]
-; CHECK: %foo = fadd double %add1, %add1
+; CHECK-LABEL: define double @test4(
+; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) {
+; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]], !fpmath [[META1]]
+; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]]
+; CHECK-NEXT: ret double [[FOO]]
+;
%add1 = fadd double %x, %y, !fpmath !0
%add2 = fadd double %x, %y, !fpmath !1
%foo = fadd double %add1, %add2
@@ -42,17 +54,22 @@ define double @test4(double %x, double %y) {
}
define double @test5(double %x) {
-; CHECK: @test5(double %x)
-; CHECK: %neg1 = fneg double %x, !fpmath ![[MD1]]
-; CHECK: %foo = fadd double %neg1, %neg1
+; CHECK-LABEL: define double @test5(
+; CHECK-SAME: double [[X:%.*]]) {
+; CHECK-NEXT: [[NEG1:%.*]] = fneg double [[X]], !fpmath [[META1]]
+; CHECK-NEXT: [[FOO:%.*]] = fadd double [[NEG1]], [[NEG1]]
+; CHECK-NEXT: ret double [[FOO]]
+;
%neg1 = fneg double %x, !fpmath !0
%neg2 = fneg double %x, !fpmath !1
%foo = fadd double %neg1, %neg2
ret double %foo
}
-; CHECK: ![[MD0]] = !{float 5.000000e+00}
-; CHECK: ![[MD1]] = !{float 2.500000e+00}
!0 = !{ float 5.0 }
!1 = !{ float 2.5 }
+;.
+; CHECK: [[META0]] = !{float 5.000000e+00}
+; CHECK: [[META1]] = !{float 2.500000e+00}
+;.
diff --git a/llvm/test/Transforms/NewGVN/funclet.ll b/llvm/test/Transforms/NewGVN/funclet.ll
index 3df3f94..8c1cbd6 100644
--- a/llvm/test/Transforms/NewGVN/funclet.ll
+++ b/llvm/test/Transforms/NewGVN/funclet.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i686-pc-windows-msvc"
@@ -8,13 +9,35 @@ target triple = "i686-pc-windows-msvc"
@"_TI1?AUA@@" = external constant %eh.ThrowInfo
define i8 @f() personality ptr @__CxxFrameHandler3 {
+; CHECK-LABEL: define i8 @f() personality ptr @__CxxFrameHandler3 {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[B:%.*]] = alloca i8, align 1
+; CHECK-NEXT: [[C:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[B]], align 1
+; CHECK-NEXT: store i8 13, ptr [[C]], align 1
+; CHECK-NEXT: invoke void @_CxxThrowException(ptr [[B]], ptr nonnull @"_TI1?AUA@@")
+; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
+; CHECK: catch.dispatch:
+; CHECK-NEXT: [[CS1:%.*]] = catchswitch within none [label %catch] unwind to caller
+; CHECK: catch:
+; CHECK-NEXT: [[CATCHPAD:%.*]] = catchpad within [[CS1]] [ptr null, i32 64, ptr null]
+; CHECK-NEXT: store i8 5, ptr [[B]], align 1
+; CHECK-NEXT: catchret from [[CATCHPAD]] to label [[TRY_CONT:%.*]]
+; CHECK: try.cont:
+; CHECK-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[B]], align 1
+; CHECK-NEXT: [[LOAD_C:%.*]] = load i8, ptr [[C]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[LOAD_B]], [[LOAD_C]]
+; CHECK-NEXT: ret i8 [[ADD]]
+; CHECK: unreachable:
+; CHECK-NEXT: unreachable
+;
entry:
%b = alloca i8
%c = alloca i8
store i8 42, ptr %b
store i8 13, ptr %c
invoke void @_CxxThrowException(ptr %b, ptr nonnull @"_TI1?AUA@@")
- to label %unreachable unwind label %catch.dispatch
+ to label %unreachable unwind label %catch.dispatch
catch.dispatch: ; preds = %entry
%cs1 = catchswitch within none [label %catch] unwind to caller
@@ -33,11 +56,6 @@ try.cont: ; preds = %catch
unreachable: ; preds = %entry
unreachable
}
-; CHECK-LABEL: define i8 @f(
-; CHECK: %[[load_b:.*]] = load i8, ptr %b
-; CHECK-NEXT: %[[load_c:.*]] = load i8, ptr %c
-; CHECK-NEXT: %[[add:.*]] = add i8 %[[load_b]], %[[load_c]]
-; CHECK-NEXT: ret i8 %[[add]]
declare i32 @__CxxFrameHandler3(...)
diff --git a/llvm/test/Transforms/NewGVN/int_sideeffect.ll b/llvm/test/Transforms/NewGVN/int_sideeffect.ll
index f715d02..a2c54bd 100644
--- a/llvm/test/Transforms/NewGVN/int_sideeffect.ll
+++ b/llvm/test/Transforms/NewGVN/int_sideeffect.ll
@@ -1,27 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S < %s -passes=newgvn | FileCheck %s
declare void @llvm.sideeffect()
; Store-to-load forwarding across a @llvm.sideeffect.
-; CHECK-LABEL: s2l
-; CHECK-NOT: load
define float @s2l(ptr %p) {
- store float 0.0, ptr %p
- call void @llvm.sideeffect()
- %t = load float, ptr %p
- ret float %t
+; CHECK-LABEL: define float @s2l(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT: store float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT: call void @llvm.sideeffect()
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ store float 0.0, ptr %p
+ call void @llvm.sideeffect()
+ %t = load float, ptr %p
+ ret float %t
}
; Redundant load elimination across a @llvm.sideeffect.
-; CHECK-LABEL: rle
-; CHECK: load
-; CHECK-NOT: load
define float @rle(ptr %p) {
- %r = load float, ptr %p
- call void @llvm.sideeffect()
- %s = load float, ptr %p
- %t = fadd float %r, %s
- ret float %t
+; CHECK-LABEL: define float @rle(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P]], align 4
+; CHECK-NEXT: call void @llvm.sideeffect()
+; CHECK-NEXT: [[T:%.*]] = fadd float [[R]], [[R]]
+; CHECK-NEXT: ret float [[T]]
+;
+ %r = load float, ptr %p
+ call void @llvm.sideeffect()
+ %s = load float, ptr %p
+ %t = fadd float %r, %s
+ ret float %t
}
diff --git a/llvm/test/Transforms/NewGVN/invariant.group.ll b/llvm/test/Transforms/NewGVN/invariant.group.ll
index 81e733f..7c14059 100644
--- a/llvm/test/Transforms/NewGVN/invariant.group.ll
+++ b/llvm/test/Transforms/NewGVN/invariant.group.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
%struct.A = type { ptr }
@@ -6,86 +7,131 @@
@unknownPtr = external global i8
-; CHECK-LABEL: define i8 @simple() {
define i8 @simple() {
+; CHECK-LABEL: define i8 @simple() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0:![0-9]+]]
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: ret i8 42
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr, !invariant.group !0
- call void @foo(ptr %ptr)
-
- %a = load i8, ptr %ptr, !invariant.group !0
- %b = load i8, ptr %ptr, !invariant.group !0
- %c = load i8, ptr %ptr, !invariant.group !0
-; CHECK: ret i8 42
- ret i8 %a
+ %ptr = alloca i8
+ store i8 42, ptr %ptr, !invariant.group !0
+ call void @foo(ptr %ptr)
+
+ %a = load i8, ptr %ptr, !invariant.group !0
+ %b = load i8, ptr %ptr, !invariant.group !0
+ %c = load i8, ptr %ptr, !invariant.group !0
+ ret i8 %a
}
-; CHECK-LABEL: define i8 @optimizable1() {
define i8 @optimizable1() {
+; CHECK-LABEL: define i8 @optimizable1() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: [[PTR2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[PTR]])
+; CHECK-NEXT: call void @foo(ptr [[PTR2]])
+; CHECK-NEXT: ret i8 42
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr, !invariant.group !0
- %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
- %a = load i8, ptr %ptr, !invariant.group !0
-
- call void @foo(ptr %ptr2); call to use %ptr2
-; CHECK: ret i8 42
- ret i8 %a
+ %ptr = alloca i8
+ store i8 42, ptr %ptr, !invariant.group !0
+ %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
+ %a = load i8, ptr %ptr, !invariant.group !0
+
+ call void @foo(ptr %ptr2); call to use %ptr2
+ ret i8 %a
}
-; CHECK-LABEL: define i8 @optimizable2() {
define i8 @optimizable2() {
+; CHECK-LABEL: define i8 @optimizable2() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: store i8 13, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @bar(i8 13)
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: ret i8 42
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr, !invariant.group !0
- call void @foo(ptr %ptr)
-
- store i8 13, ptr %ptr ; can't use this store with invariant.group
- %a = load i8, ptr %ptr
- call void @bar(i8 %a) ; call to use %a
-
- call void @foo(ptr %ptr)
- %b = load i8, ptr %ptr, !invariant.group !0
-
-; CHECK: ret i8 42
- ret i8 %b
+ %ptr = alloca i8
+ store i8 42, ptr %ptr, !invariant.group !0
+ call void @foo(ptr %ptr)
+
+ store i8 13, ptr %ptr ; can't use this store with invariant.group
+ %a = load i8, ptr %ptr
+ call void @bar(i8 %a) ; call to use %a
+
+ call void @foo(ptr %ptr)
+ %b = load i8, ptr %ptr, !invariant.group !0
+
+ ret i8 %b
}
-; CHECK-LABEL: define i1 @proveEqualityForStrip(
define i1 @proveEqualityForStrip(ptr %a) {
+; CHECK-LABEL: define i1 @proveEqualityForStrip(
+; CHECK-SAME: ptr [[A:%.*]]) {
+; CHECK-NEXT: ret i1 true
+;
%b1 = call ptr @llvm.strip.invariant.group.p0(ptr %a)
-; CHECK-NOT: llvm.strip.invariant.group
%b2 = call ptr @llvm.strip.invariant.group.p0(ptr %a)
%r = icmp eq ptr %b1, %b2
-; CHECK: ret i1 true
ret i1 %r
}
-; CHECK-LABEL: define i8 @unoptimizable1() {
define i8 @unoptimizable1() {
+; CHECK-LABEL: define i8 @unoptimizable1() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: ret i8 [[A]]
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr
- call void @foo(ptr %ptr)
- %a = load i8, ptr %ptr, !invariant.group !0
-; CHECK: ret i8 %a
- ret i8 %a
+ %ptr = alloca i8
+ store i8 42, ptr %ptr
+ call void @foo(ptr %ptr)
+ %a = load i8, ptr %ptr, !invariant.group !0
+ ret i8 %a
}
; NewGVN doesn't support assumes.
-; CHECK-LABEL: define void @indirectLoads() {
define void @indirectLoads() {
+; CHECK-LABEL: define void @indirectLoads() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[CALL:%.*]] = call ptr @getPointer(ptr null)
+; CHECK-NEXT: call void @_ZN1AC1Ev(ptr [[CALL]])
+; CHECK-NEXT: [[VTABLE:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
+; CHECK-NEXT: [[CMP_VTABLES:%.*]] = icmp eq ptr [[VTABLE]], getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2)
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_VTABLES]])
+; CHECK-NEXT: store ptr [[CALL]], ptr [[A]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VTABLE]], align 8
+; CHECK-NEXT: call void [[TMP0]](ptr [[CALL]])
+; CHECK-NEXT: [[VTABLE2:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VTABLE2]], align 8
+; CHECK-NEXT: call void [[TMP1]](ptr [[CALL]])
+; CHECK-NEXT: [[VTABLE4:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
+; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VTABLE4]], align 8
+; CHECK-NEXT: call void [[TMP2]](ptr [[CALL]])
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VTABLE]], align 8
+; CHECK-NEXT: call void [[TMP3]](ptr [[CALL]])
+; CHECK-NEXT: ret void
+;
entry:
%a = alloca ptr, align 8
-
- %call = call ptr @getPointer(ptr null)
+
+ %call = call ptr @getPointer(ptr null)
call void @_ZN1AC1Ev(ptr %call)
-
-; CHECK: %vtable = load {{.*}} !invariant.group
+
%vtable = load ptr, ptr %call, align 8, !invariant.group !0
%cmp.vtables = icmp eq ptr %vtable, getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2)
call void @llvm.assume(i1 %cmp.vtables)
-
+
store ptr %call, ptr %a, align 8
%0 = load ptr, ptr %a, align 8
@@ -98,36 +144,45 @@ entry:
; FIXME: call void @_ZN1A3fooEv(
%vtable2 = load ptr, ptr %2, align 8, !invariant.group !0
%3 = load ptr, ptr %vtable2, align 8
-
+
call void %3(ptr %2)
%4 = load ptr, ptr %a, align 8
-
+
%vtable4 = load ptr, ptr %4, align 8, !invariant.group !0
%5 = load ptr, ptr %vtable4, align 8
; FIXME: call void @_ZN1A3fooEv(
call void %5(ptr %4)
-
+
%vtable5 = load ptr, ptr %call, align 8, !invariant.group !0
%6 = load ptr, ptr %vtable5, align 8
; FIXME: call void @_ZN1A3fooEv(
call void %6(ptr %4)
-
+
ret void
}
; NewGVN won't CSE loads with different pointee types.
-; CHECK-LABEL: define void @combiningBitCastWithLoad() {
define void @combiningBitCastWithLoad() {
+; CHECK-LABEL: define void @combiningBitCastWithLoad() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[CALL:%.*]] = call ptr @getPointer(ptr null)
+; CHECK-NEXT: call void @_ZN1AC1Ev(ptr [[CALL]])
+; CHECK-NEXT: [[VTABLE:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
+; CHECK-NEXT: store ptr [[CALL]], ptr [[A]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VTABLE]], align 8
+; CHECK-NEXT: call void [[TMP0]](ptr [[CALL]])
+; CHECK-NEXT: ret void
+;
entry:
%a = alloca ptr, align 8
-
- %call = call ptr @getPointer(ptr null)
+
+ %call = call ptr @getPointer(ptr null)
call void @_ZN1AC1Ev(ptr %call)
-
-; CHECK: %vtable = load {{.*}} !invariant.group
+
%vtable = load ptr, ptr %call, align 8, !invariant.group !0
%cmp.vtables = icmp eq ptr %vtable, getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2)
-
+
store ptr %call, ptr %a, align 8
; FIXME-NOT: !invariant.group
%0 = load ptr, ptr %a, align 8
@@ -139,173 +194,245 @@ entry:
ret void
}
-; CHECK-LABEL:define void @loadCombine() {
define void @loadCombine() {
+; CHECK-LABEL: define void @loadCombine() {
+; CHECK-NEXT: enter:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @bar(i8 [[A]])
+; CHECK-NEXT: call void @bar(i8 [[A]])
+; CHECK-NEXT: ret void
+;
enter:
%ptr = alloca i8
store i8 42, ptr %ptr
call void @foo(ptr %ptr)
-; CHECK: %[[A:.*]] = load i8, ptr %ptr, align 1, !invariant.group
%a = load i8, ptr %ptr, !invariant.group !0
-; CHECK-NOT: load
%b = load i8, ptr %ptr, !invariant.group !0
-; CHECK: call void @bar(i8 %[[A]])
call void @bar(i8 %a)
-; CHECK: call void @bar(i8 %[[A]])
call void @bar(i8 %b)
ret void
}
-; CHECK-LABEL: define void @loadCombine1() {
define void @loadCombine1() {
+; CHECK-LABEL: define void @loadCombine1() {
+; CHECK-NEXT: enter:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[C:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @bar(i8 [[C]])
+; CHECK-NEXT: call void @bar(i8 [[C]])
+; CHECK-NEXT: ret void
+;
enter:
%ptr = alloca i8
store i8 42, ptr %ptr
call void @foo(ptr %ptr)
-; CHECK: %[[D:.*]] = load i8, ptr %ptr, align 1, !invariant.group
%c = load i8, ptr %ptr
-; CHECK-NOT: load
%d = load i8, ptr %ptr, !invariant.group !0
-; CHECK: call void @bar(i8 %[[D]])
call void @bar(i8 %c)
-; CHECK: call void @bar(i8 %[[D]])
call void @bar(i8 %d)
ret void
}
-; CHECK-LABEL: define void @loadCombine2() {
define void @loadCombine2() {
+; CHECK-LABEL: define void @loadCombine2() {
+; CHECK-NEXT: enter:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[E:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @bar(i8 [[E]])
+; CHECK-NEXT: call void @bar(i8 [[E]])
+; CHECK-NEXT: ret void
+;
enter:
%ptr = alloca i8
store i8 42, ptr %ptr
call void @foo(ptr %ptr)
-; CHECK: %[[E:.*]] = load i8, ptr %ptr, align 1, !invariant.group
%e = load i8, ptr %ptr, !invariant.group !0
-; CHECK-NOT: load
%f = load i8, ptr %ptr
-; CHECK: call void @bar(i8 %[[E]])
call void @bar(i8 %e)
-; CHECK: call void @bar(i8 %[[E]])
call void @bar(i8 %f)
ret void
}
-; CHECK-LABEL: define void @loadCombine3() {
define void @loadCombine3() {
+; CHECK-LABEL: define void @loadCombine3() {
+; CHECK-NEXT: enter:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[E:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @bar(i8 [[E]])
+; CHECK-NEXT: call void @bar(i8 [[E]])
+; CHECK-NEXT: ret void
+;
enter:
%ptr = alloca i8
store i8 42, ptr %ptr
call void @foo(ptr %ptr)
-; CHECK: %[[E:.*]] = load i8, ptr %ptr, align 1, !invariant.group
%e = load i8, ptr %ptr, !invariant.group !0
-; CHECK-NOT: load
%f = load i8, ptr %ptr, !invariant.group !0
-; CHECK: call void @bar(i8 %[[E]])
call void @bar(i8 %e)
-; CHECK: call void @bar(i8 %[[E]])
call void @bar(i8 %f)
ret void
}
-; CHECK-LABEL: define i8 @unoptimizable2() {
define i8 @unoptimizable2() {
+; CHECK-LABEL: define i8 @unoptimizable2() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: ret i8 [[A]]
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr
- call void @foo(ptr %ptr)
- %a = load i8, ptr %ptr
- call void @foo(ptr %ptr)
- %b = load i8, ptr %ptr, !invariant.group !0
-
-; CHECK: ret i8 %a
- ret i8 %a
+ %ptr = alloca i8
+ store i8 42, ptr %ptr
+ call void @foo(ptr %ptr)
+ %a = load i8, ptr %ptr
+ call void @foo(ptr %ptr)
+ %b = load i8, ptr %ptr, !invariant.group !0
+
+ ret i8 %a
}
-; CHECK-LABEL: define i8 @unoptimizable3() {
define i8 @unoptimizable3() {
+; CHECK-LABEL: define i8 @unoptimizable3() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: [[PTR2:%.*]] = call ptr @getPointer(ptr [[PTR]])
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR2]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: ret i8 [[A]]
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr, !invariant.group !0
- %ptr2 = call ptr @getPointer(ptr %ptr)
- %a = load i8, ptr %ptr2, !invariant.group !0
-
-; CHECK: ret i8 %a
- ret i8 %a
+ %ptr = alloca i8
+ store i8 42, ptr %ptr, !invariant.group !0
+ %ptr2 = call ptr @getPointer(ptr %ptr)
+ %a = load i8, ptr %ptr2, !invariant.group !0
+
+ ret i8 %a
}
; NewGVN cares about the launder for some reason.
-; CHECK-LABEL: define i8 @optimizable4() {
define i8 @optimizable4() {
+; CHECK-LABEL: define i8 @optimizable4() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
+; CHECK-NEXT: [[PTR2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[PTR]])
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR2]], align 1
+; CHECK-NEXT: ret i8 [[A]]
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr
- %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
+ %ptr = alloca i8
+ store i8 42, ptr %ptr
+ %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
; FIXME-NOT: load
- %a = load i8, ptr %ptr2
-
+ %a = load i8, ptr %ptr2
+
; FIXME: ret i8 42
- ret i8 %a
+ ret i8 %a
}
-; CHECK-LABEL: define i8 @volatile1() {
define i8 @volatile1() {
+; CHECK-LABEL: define i8 @volatile1() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @bar(i8 [[B]])
+; CHECK-NEXT: [[C:%.*]] = load volatile i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @bar(i8 [[C]])
+; CHECK-NEXT: ret i8 42
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr, !invariant.group !0
- call void @foo(ptr %ptr)
- %a = load i8, ptr %ptr, !invariant.group !0
- %b = load volatile i8, ptr %ptr
-; CHECK: call void @bar(i8 %b)
- call void @bar(i8 %b)
-
- %c = load volatile i8, ptr %ptr, !invariant.group !0
+ %ptr = alloca i8
+ store i8 42, ptr %ptr, !invariant.group !0
+ call void @foo(ptr %ptr)
+ %a = load i8, ptr %ptr, !invariant.group !0
+ %b = load volatile i8, ptr %ptr
+ call void @bar(i8 %b)
+
+ %c = load volatile i8, ptr %ptr, !invariant.group !0
; We might be able to optimize this, but nobody cares
-; CHECK: call void @bar(i8 %c)
- call void @bar(i8 %c)
-; CHECK: ret i8 42
- ret i8 %a
+ call void @bar(i8 %c)
+ ret i8 %a
}
-; CHECK-LABEL: define i8 @volatile2() {
define i8 @volatile2() {
+; CHECK-LABEL: define i8 @volatile2() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @bar(i8 [[B]])
+; CHECK-NEXT: [[C:%.*]] = load volatile i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @bar(i8 [[C]])
+; CHECK-NEXT: ret i8 42
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr, !invariant.group !0
- call void @foo(ptr %ptr)
- %a = load i8, ptr %ptr, !invariant.group !0
- %b = load volatile i8, ptr %ptr
-; CHECK: call void @bar(i8 %b)
- call void @bar(i8 %b)
-
- %c = load volatile i8, ptr %ptr, !invariant.group !0
+ %ptr = alloca i8
+ store i8 42, ptr %ptr, !invariant.group !0
+ call void @foo(ptr %ptr)
+ %a = load i8, ptr %ptr, !invariant.group !0
+ %b = load volatile i8, ptr %ptr
+ call void @bar(i8 %b)
+
+ %c = load volatile i8, ptr %ptr, !invariant.group !0
; We might be able to optimize this, but nobody cares
-; CHECK: call void @bar(i8 %c)
- call void @bar(i8 %c)
-; CHECK: ret i8 42
- ret i8 %a
+ call void @bar(i8 %c)
+ ret i8 %a
}
-; CHECK-LABEL: define void @fun() {
define void @fun() {
+; CHECK-LABEL: define void @fun() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @foo(ptr [[PTR]])
+; CHECK-NEXT: call void @bar(i8 42)
+; CHECK-NEXT: ret void
+;
entry:
- %ptr = alloca i8
- store i8 42, ptr %ptr, !invariant.group !0
- call void @foo(ptr %ptr)
+ %ptr = alloca i8
+ store i8 42, ptr %ptr, !invariant.group !0
+ call void @foo(ptr %ptr)
- %a = load i8, ptr %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change
-; CHECK: call void @bar(i8 42)
- call void @bar(i8 %a)
+ %a = load i8, ptr %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change
+ call void @bar(i8 %a)
- ret void
+ ret void
}
; FIXME: NewGVN doesn't run instsimplify on a load from a vtable definition?
; This test checks if invariant.group understands gep with zeros
-; CHECK-LABEL: define void @testGEP0() {
define void @testGEP0() {
+; CHECK-LABEL: define void @testGEP0() {
+; CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_A:%.*]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr [[A]], align 8, !invariant.group [[META0]]
+; CHECK-NEXT: call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) [[A]])
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label [[_Z1GR1A_EXIT:%.*]], label [[TMP3:%.*]]
+; CHECK: 3:
+; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), align 8
+; CHECK-NEXT: call void [[TMP4]](ptr nonnull [[A]])
+; CHECK-NEXT: br label [[_Z1GR1A_EXIT]]
+; CHECK: _Z1gR1A.exit:
+; CHECK-NEXT: ret void
+;
%a = alloca %struct.A, align 8
store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr %a, align 8, !invariant.group !0
-; CHECK: call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) %a)
call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) %a) ; This call may change vptr
%1 = load i8, ptr @unknownPtr, align 4
%2 = icmp eq i8 %1, 0
@@ -325,54 +452,93 @@ _Z1gR1A.exit: ; preds = %0, %3
; Check if no optimizations are performed with global pointers.
; FIXME: we could do the optimizations if we would check if dependency comes
; from the same function.
-; CHECK-LABEL: define void @testGlobal() {
define void @testGlobal() {
-; CHECK: %a = load i8, ptr @unknownPtr, align 1, !invariant.group !0
- %a = load i8, ptr @unknownPtr, !invariant.group !0
- call void @foo2(ptr @unknownPtr, i8 %a)
-; CHECK: %1 = load i8, ptr @unknownPtr, align 1, !invariant.group !0
- %1 = load i8, ptr @unknownPtr, !invariant.group !0
- call void @bar(i8 %1)
-
- call void @fooBit(ptr @unknownPtr, i1 1)
+; CHECK-LABEL: define void @testGlobal() {
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr @unknownPtr, align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @foo2(ptr @unknownPtr, i8 [[A]])
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @bar(i8 [[TMP1]])
+; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 true)
+; CHECK-NEXT: [[TMP2:%.*]] = load i1, ptr @unknownPtr, align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 [[TMP2]])
+; CHECK-NEXT: [[TMP3:%.*]] = load i1, ptr @unknownPtr, align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 [[TMP3]])
+; CHECK-NEXT: ret void
+;
+ %a = load i8, ptr @unknownPtr, !invariant.group !0
+ call void @foo2(ptr @unknownPtr, i8 %a)
+ %1 = load i8, ptr @unknownPtr, !invariant.group !0
+ call void @bar(i8 %1)
+
+ call void @fooBit(ptr @unknownPtr, i1 1)
; Adding regex because of canonicalization of bitcasts
-; CHECK: %2 = load i1, ptr {{.*}}, !invariant.group !0
- %2 = load i1, ptr @unknownPtr, !invariant.group !0
- call void @fooBit(ptr @unknownPtr, i1 %2)
-; CHECK: %3 = load i1, ptr {{.*}}, !invariant.group !0
- %3 = load i1, ptr @unknownPtr, !invariant.group !0
- call void @fooBit(ptr @unknownPtr, i1 %3)
- ret void
+ %2 = load i1, ptr @unknownPtr, !invariant.group !0
+ call void @fooBit(ptr @unknownPtr, i1 %2)
+ %3 = load i1, ptr @unknownPtr, !invariant.group !0
+ call void @fooBit(ptr @unknownPtr, i1 %3)
+ ret void
}
; Might be similar to above where NewGVN doesn't handle loads of different types from the same location.
; Not super important anyway.
-; CHECK-LABEL: define void @testTrunc() {
define void @testTrunc() {
- %a = alloca i8
- call void @foo(ptr %a)
-; CHECK: %b = load i8, ptr %a, align 1, !invariant.group !0
- %b = load i8, ptr %a, !invariant.group !0
- call void @foo2(ptr %a, i8 %b)
-
- %1 = load i8, ptr %a, !invariant.group !0
-; CHECK: call void @bar(i8 %b)
- call void @bar(i8 %1)
-
- call void @fooBit(ptr %a, i1 1)
+; CHECK-LABEL: define void @testTrunc() {
+; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
+; CHECK-NEXT: call void @foo(ptr [[A]])
+; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[A]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @foo2(ptr [[A]], i8 [[B]])
+; CHECK-NEXT: call void @bar(i8 [[B]])
+; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = load i1, ptr [[A]], align 1, !invariant.group [[META0]]
+; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 [[TMP1]])
+; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 [[TMP1]])
+; CHECK-NEXT: ret void
+;
+ %a = alloca i8
+ call void @foo(ptr %a)
+ %b = load i8, ptr %a, !invariant.group !0
+ call void @foo2(ptr %a, i8 %b)
+
+ %1 = load i8, ptr %a, !invariant.group !0
+ call void @bar(i8 %1)
+
+ call void @fooBit(ptr %a, i1 1)
; FIXME: %1 = trunc i8 %b to i1
- %2 = load i1, ptr %a, !invariant.group !0
+ %2 = load i1, ptr %a, !invariant.group !0
; FIXME-NEXT: call void @fooBit(ptr %a, i1 %1)
- call void @fooBit(ptr %a, i1 %2)
- %3 = load i1, ptr %a, !invariant.group !0
+ call void @fooBit(ptr %a, i1 %2)
+ %3 = load i1, ptr %a, !invariant.group !0
; FIXME-NEXT: call void @fooBit(ptr %a, i1 %1)
- call void @fooBit(ptr %a, i1 %3)
- ret void
+ call void @fooBit(ptr %a, i1 %3)
+ ret void
}
; See comment in @testGEP0 on what NewGVN is lacking.
-; CHECK-LABEL: define void @handling_loops()
define void @handling_loops() {
+; CHECK-LABEL: define void @handling_loops() {
+; CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_A:%.*]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr [[A]], align 8, !invariant.group [[META0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label [[DOTLR_PH_I:%.*]], label [[_Z2G2R1A_EXIT:%.*]]
+; CHECK: .lr.ph.i:
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[TMP1]], 1
+; CHECK-NEXT: br i1 [[TMP3]], label [[DOT_CRIT_EDGE_PREHEADER:%.*]], label [[_Z2G2R1A_EXIT]]
+; CHECK: ._crit_edge.preheader:
+; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
+; CHECK: ._crit_edge:
+; CHECK-NEXT: [[TMP4:%.*]] = phi i8 [ [[TMP6:%.*]], [[DOT_CRIT_EDGE]] ], [ 1, [[DOT_CRIT_EDGE_PREHEADER]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), align 8
+; CHECK-NEXT: call void [[TMP5]](ptr nonnull [[A]])
+; CHECK-NEXT: [[TMP6]] = add nuw nsw i8 [[TMP4]], 1
+; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr @unknownPtr, align 4
+; CHECK-NEXT: [[TMP8:%.*]] = icmp slt i8 [[TMP6]], [[TMP7]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[DOT_CRIT_EDGE]], label [[_Z2G2R1A_EXIT_LOOPEXIT:%.*]]
+; CHECK: _Z2g2R1A.exit.loopexit:
+; CHECK-NEXT: br label [[_Z2G2R1A_EXIT]]
+; CHECK: _Z2g2R1A.exit:
+; CHECK-NEXT: ret void
+;
%a = alloca %struct.A, align 8
store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr %a, align 8, !invariant.group !0
%1 = load i8, ptr @unknownPtr, align 4
@@ -424,3 +590,6 @@ declare void @llvm.assume(i1 %cmp.vtables) #0
attributes #0 = { nounwind }
!0 = !{}
+;.
+; CHECK: [[META0]] = !{}
+;.
diff --git a/llvm/test/Transforms/NewGVN/invariant.start.ll b/llvm/test/Transforms/NewGVN/invariant.start.ll
index 100b79f..9bf1c55 100644
--- a/llvm/test/Transforms/NewGVN/invariant.start.ll
+++ b/llvm/test/Transforms/NewGVN/invariant.start.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; Test to make sure llvm.invariant.start calls are not treated as clobbers.
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
@@ -7,10 +8,12 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture) nounwind
; We forward store to the load across the invariant.start intrinsic
define i8 @forward_store() {
-; CHECK-LABEL: @forward_store
-; CHECK: call ptr @llvm.invariant.start.p0(i64 1, ptr %a)
-; CHECK-NOT: load
-; CHECK: ret i8 0
+; CHECK-LABEL: define i8 @forward_store() {
+; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 0, ptr [[A]], align 1
+; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: ret i8 0
+;
%a = alloca i8
store i8 0, ptr %a
%i = call ptr @llvm.invariant.start.p0(i64 1, ptr %a)
@@ -23,10 +26,18 @@ declare i8 @dummy(ptr nocapture) nounwind readonly
; We forward store to the load in the non-local analysis case,
; i.e. invariant.start is in another basic block.
define i8 @forward_store_nonlocal(i1 %cond) {
-; CHECK-LABEL: forward_store_nonlocal
-; CHECK: call ptr @llvm.invariant.start.p0(i64 1, ptr %a)
-; CHECK: ret i8 0
-; CHECK: ret i8 %val
+; CHECK-LABEL: define i8 @forward_store_nonlocal(
+; CHECK-SAME: i1 [[COND:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 0, ptr [[A]], align 1
+; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: br i1 [[COND]], label [[LOADBLOCK:%.*]], label [[EXIT:%.*]]
+; CHECK: loadblock:
+; CHECK-NEXT: ret i8 0
+; CHECK: exit:
+; CHECK-NEXT: [[VAL:%.*]] = call i8 @dummy(ptr [[A]])
+; CHECK-NEXT: ret i8 [[VAL]]
+;
%a = alloca i8
store i8 0, ptr %a
%i = call ptr @llvm.invariant.start.p0(i64 1, ptr %a)
@@ -43,12 +54,14 @@ exit:
; We should not value forward %foo to the invariant.end corresponding to %bar.
define i8 @forward_store1() {
-; CHECK-LABEL: forward_store1
-; CHECK: %foo = call ptr @llvm.invariant.start.p0
-; CHECK-NOT: load
-; CHECK: %bar = call ptr @llvm.invariant.start.p0
-; CHECK: call void @llvm.invariant.end.p0(ptr %bar, i64 1, ptr %a)
-; CHECK: ret i8 0
+; CHECK-LABEL: define i8 @forward_store1() {
+; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 0, ptr [[A]], align 1
+; CHECK-NEXT: [[FOO:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: [[BAR:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[BAR]], i64 1, ptr [[A]])
+; CHECK-NEXT: ret i8 0
+;
%a = alloca i8
store i8 0, ptr %a
%foo = call ptr @llvm.invariant.start.p0(i64 1, ptr %a)
diff --git a/llvm/test/Transforms/NewGVN/lifetime-simple.ll b/llvm/test/Transforms/NewGVN/lifetime-simple.ll
index 5d31101..55e4611 100644
--- a/llvm/test/Transforms/NewGVN/lifetime-simple.ll
+++ b/llvm/test/Transforms/NewGVN/lifetime-simple.ll
@@ -1,12 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin7"
define i8 @test(ptr %P) nounwind {
-; CHECK: lifetime.start
-; CHECK-NOT: load
-; CHECK: lifetime.end
+; CHECK-LABEL: define i8 @test(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: store i8 1, ptr [[P]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P]], align 1
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
entry:
call void @llvm.lifetime.start.p0(i64 32, ptr %P)
%0 = load i8, ptr %P
diff --git a/llvm/test/Transforms/NewGVN/load-constant-mem.ll b/llvm/test/Transforms/NewGVN/load-constant-mem.ll
index 06439c59..ae91147 100644
--- a/llvm/test/Transforms/NewGVN/load-constant-mem.ll
+++ b/llvm/test/Transforms/NewGVN/load-constant-mem.ll
@@ -7,14 +7,14 @@ define i32 @test(ptr %p, i32 %i) nounwind {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = getelementptr [4 x i32], ptr @G, i32 0, i32 [[I:%.*]]
-; CHECK-NEXT: store i8 4, ptr [[P:%.*]]
+; CHECK-NEXT: store i8 4, ptr [[P1:%.*]], align 1
; CHECK-NEXT: ret i32 0
;
entry:
- %P = getelementptr [4 x i32], ptr @G, i32 0, i32 %i
- %A = load i32, ptr %P
+ %p.i = getelementptr [4 x i32], ptr @G, i32 0, i32 %i
+ %A = load i32, ptr %p.i
store i8 4, ptr %p
- %B = load i32, ptr %P
+ %B = load i32, ptr %p.i
%C = sub i32 %A, %B
ret i32 %C
}
diff --git a/llvm/test/Transforms/NewGVN/load-from-unreachable-predecessor.ll b/llvm/test/Transforms/NewGVN/load-from-unreachable-predecessor.ll
index 74cb700..3ca9b9e 100644
--- a/llvm/test/Transforms/NewGVN/load-from-unreachable-predecessor.ll
+++ b/llvm/test/Transforms/NewGVN/load-from-unreachable-predecessor.ll
@@ -1,12 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
; Check that an unreachable predecessor to a PHI node doesn't cause a crash.
; PR21625.
define i32 @f(ptr %f) {
-; CHECK: bb0:
+; CHECK-LABEL: define i32 @f(
+; CHECK-SAME: ptr [[F:%.*]]) {
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: br label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br i1 false, label [[BB1:%.*]], label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: [[STOREMERGE:%.*]] = load i32, ptr null, align 4
+; CHECK-NEXT: ret i32 [[STOREMERGE]]
+;
; Load should be removed, since it's ignored.
-; CHECK-NEXT: br label
bb0:
%bar = load ptr, ptr %f
br label %bb2
diff --git a/llvm/test/Transforms/NewGVN/loadforward.ll b/llvm/test/Transforms/NewGVN/loadforward.ll
index d8a9022..85ceafd 100644
--- a/llvm/test/Transforms/NewGVN/loadforward.ll
+++ b/llvm/test/Transforms/NewGVN/loadforward.ll
@@ -9,8 +9,8 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
;; Test that we forward the first store to the second load
define i16 @bazinga() {
; CHECK-LABEL: @bazinga(
-; CHECK-NEXT: [[_TMP10:%.*]] = load i16, ptr getelementptr inbounds (%rec11, ptr @str, i64 0, i32 1)
-; CHECK-NEXT: store i16 [[_TMP10]], ptr @str
+; CHECK-NEXT: [[_TMP10:%.*]] = load i16, ptr getelementptr inbounds ([[REC11:%.*]], ptr @str, i64 0, i32 1), align 2
+; CHECK-NEXT: store i16 [[_TMP10]], ptr @str, align 2
; CHECK-NEXT: [[_TMP15:%.*]] = icmp eq i16 [[_TMP10]], 3
; CHECK-NEXT: [[_TMP16:%.*]] = select i1 [[_TMP15]], i16 1, i16 0
; CHECK-NEXT: br label [[BB1:%.*]]
diff --git a/llvm/test/Transforms/NewGVN/malloc-load-removal.ll b/llvm/test/Transforms/NewGVN/malloc-load-removal.ll
index b487acf..3e11d92 100644
--- a/llvm/test/Transforms/NewGVN/malloc-load-removal.ll
+++ b/llvm/test/Transforms/NewGVN/malloc-load-removal.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S -passes=newgvn < %s | FileCheck %s
; PR13694
@@ -7,6 +8,17 @@ target triple = "x86_64-apple-macosx10.8.0"
declare ptr @malloc(i64) nounwind allockind("alloc,uninitialized") allocsize(0) "alloc-family"="malloc"
define noalias ptr @test1() nounwind uwtable ssp {
+; CHECK-LABEL: define noalias ptr @test1(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @malloc(i64 100) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: br i1 undef, label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: store i8 0, ptr [[CALL]], align 1
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: ret ptr [[CALL]]
+;
entry:
%call = tail call ptr @malloc(i64 100) nounwind
%0 = load i8, ptr %call, align 1
@@ -20,14 +32,22 @@ if.then: ; preds = %entry
if.end: ; preds = %if.then, %entry
ret ptr %call
-; CHECK-LABEL: @test1(
-; CHECK-NOT: load
-; CHECK-NOT: icmp
}
declare ptr @_Znwm(i64) nounwind
define noalias ptr @test2() nounwind uwtable ssp {
+; CHECK-LABEL: define noalias ptr @test2(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @_Znwm(i64 100) #[[ATTR2]]
+; CHECK-NEXT: br i1 undef, label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: store i8 0, ptr [[CALL]], align 1
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: ret ptr [[CALL]]
+;
entry:
%call = tail call ptr @_Znwm(i64 100) nounwind
%0 = load i8, ptr %call, align 1
@@ -41,14 +61,22 @@ if.then: ; preds = %entry
if.end: ; preds = %if.then, %entry
ret ptr %call
-; CHECK-LABEL: @test2(
-; CHECK-NOT: load
-; CHECK-NOT: icmp
}
declare ptr @aligned_alloc(i64 allocalign, i64) nounwind allockind("alloc,uninitialized,aligned") allocsize(1) "alloc-family"="malloc"
define noalias ptr @test3() nounwind uwtable ssp {
+; CHECK-LABEL: define noalias ptr @test3(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @aligned_alloc(i64 256, i64 32) #[[ATTR2]]
+; CHECK-NEXT: br i1 undef, label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: store i8 0, ptr [[CALL]], align 1
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: ret ptr [[CALL]]
+;
entry:
%call = tail call ptr @aligned_alloc(i64 256, i64 32) nounwind
%0 = load i8, ptr %call, align 32
@@ -62,7 +90,4 @@ if.then: ; preds = %entry
if.end: ; preds = %if.then, %entry
ret ptr %call
-; CHECK-LABEL: @test3(
-; CHECK-NOT: load
-; CHECK-NOT: icmp
}
diff --git a/llvm/test/Transforms/NewGVN/memory-handling.ll b/llvm/test/Transforms/NewGVN/memory-handling.ll
index b1e1c4f..c72ff74 100644
--- a/llvm/test/Transforms/NewGVN/memory-handling.ll
+++ b/llvm/test/Transforms/NewGVN/memory-handling.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
;; This test is really dependent on propagating a lot of memory info around, but in the end, not
;; screwing up a single add.
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
@@ -20,6 +21,121 @@ declare ptr @__ctype_b_loc() local_unnamed_addr #1
; Function Attrs: nounwind uwtable
define void @BuildMask(ptr nocapture readonly) local_unnamed_addr #0 {
+; CHECK-LABEL: define void @BuildMask(
+; CHECK-SAME: ptr nocapture readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr align 16 @alPhrase, i8 0, i64 416, i1 false)
+; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr align 16 @aqMainMask, i8 0, i64 16, i1 false)
+; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr align 16 @aqMainSign, i8 0, i64 16, i1 false)
+; CHECK-NEXT: br label [[DOTSINK_SPLIT:%.*]]
+; CHECK: .sink.split:
+; CHECK-NEXT: [[DOT0:%.*]] = phi ptr [ [[TMP0]], [[TMP1:%.*]] ], [ [[TMP3:%.*]], [[TMP14:%.*]] ]
+; CHECK-NEXT: [[DOTSINK:%.*]] = phi i32 [ 0, [[TMP1]] ], [ [[TMP22:%.*]], [[TMP14]] ]
+; CHECK-NEXT: store i32 [[DOTSINK]], ptr @cchPhraseLength, align 4, !tbaa [[TBAA1:![0-9]+]]
+; CHECK-NEXT: br label [[TMP2:%.*]]
+; CHECK: 2:
+; CHECK-NEXT: [[DOT1:%.*]] = phi ptr [ [[DOT0]], [[DOTSINK_SPLIT]] ], [ [[TMP3]], [[TMP6:%.*]] ]
+; CHECK-NEXT: [[TMP3]] = getelementptr inbounds i8, ptr [[DOT1]], i64 1
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOT1]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: br i1 [[TMP5]], label [[DOTPREHEADER_PREHEADER:%.*]], label [[TMP6]]
+; CHECK: .preheader.preheader:
+; CHECK-NEXT: br label [[DOTPREHEADER:%.*]]
+; CHECK: 6:
+; CHECK-NEXT: [[TMP7:%.*]] = tail call ptr @__ctype_b_loc() #[[ATTR4:[0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8, !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT: [[TMP9:%.*]] = sext i8 [[TMP4]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i16, ptr [[TMP10]], align 2, !tbaa [[TBAA7:![0-9]+]]
+; CHECK-NEXT: [[TMP12:%.*]] = and i16 [[TMP11]], 1024
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i16 [[TMP12]], 0
+; CHECK-NEXT: br i1 [[TMP13]], label [[TMP2]], label [[TMP14]]
+; CHECK: 14:
+; CHECK-NEXT: [[TMP15:%.*]] = sext i8 [[TMP4]] to i32
+; CHECK-NEXT: [[TMP16:%.*]] = tail call i32 @tolower(i32 [[TMP15]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP16]], -97
+; CHECK-NEXT: [[TMP18:%.*]] = sext i32 [[TMP17]] to i64
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [26 x %struct.Letter], ptr @alPhrase, i64 0, i64 [[TMP18]], i32 0
+; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 16, !tbaa [[TBAA9:![0-9]+]]
+; CHECK-NEXT: [[TMP21:%.*]] = add i32 [[TMP20]], 1
+; CHECK-NEXT: store i32 [[TMP21]], ptr [[TMP19]], align 16, !tbaa [[TBAA9]]
+; CHECK-NEXT: [[TMP22]] = add nsw i32 [[DOTSINK]], 1
+; CHECK-NEXT: br label [[DOTSINK_SPLIT]]
+; CHECK: .preheader:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[DOTPREHEADER_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[TMP57:%.*]] ]
+; CHECK-NEXT: [[DOT04961:%.*]] = phi i32 [ [[DOT2:%.*]], [[TMP57]] ], [ 0, [[DOTPREHEADER_PREHEADER]] ]
+; CHECK-NEXT: [[DOT05160:%.*]] = phi i32 [ [[DOT253:%.*]], [[TMP57]] ], [ 0, [[DOTPREHEADER_PREHEADER]] ]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [26 x %struct.Letter], ptr @alPhrase, i64 0, i64 [[INDVARS_IV]], i32 0
+; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 16, !tbaa [[TBAA9]]
+; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP24]], 0
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [26 x i32], ptr @auGlobalFrequency, i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: br i1 [[TMP25]], label [[TMP27:%.*]], label [[TMP28:%.*]]
+; CHECK: 27:
+; CHECK-NEXT: store i32 -1, ptr [[TMP26]], align 4, !tbaa [[TBAA1]]
+; CHECK-NEXT: br label [[TMP57]]
+; CHECK: 28:
+; CHECK-NEXT: store i32 0, ptr [[TMP26]], align 4, !tbaa [[TBAA1]]
+; CHECK-NEXT: [[TMP29:%.*]] = zext i32 [[TMP24]] to i64
+; CHECK-NEXT: br i1 false, label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH_PREHEADER:%.*]]
+; CHECK: .lr.ph.preheader:
+; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
+; CHECK: .lr.ph:
+; CHECK-NEXT: [[DOT04658:%.*]] = phi i64 [ [[TMP31:%.*]], [[DOTLR_PH]] ], [ 1, [[DOTLR_PH_PREHEADER]] ]
+; CHECK-NEXT: [[DOT04857:%.*]] = phi i32 [ [[TMP30:%.*]], [[DOTLR_PH]] ], [ 1, [[DOTLR_PH_PREHEADER]] ]
+; CHECK-NEXT: [[TMP30]] = add nuw nsw i32 [[DOT04857]], 1
+; CHECK-NEXT: [[TMP31]] = shl i64 [[DOT04658]], 1
+; CHECK-NEXT: [[TMP32:%.*]] = icmp ult i64 [[TMP29]], [[TMP31]]
+; CHECK-NEXT: br i1 [[TMP32]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[DOTLR_PH]]
+; CHECK: ._crit_edge.loopexit:
+; CHECK-NEXT: br label [[DOT_CRIT_EDGE]]
+; CHECK: ._crit_edge:
+; CHECK-NEXT: [[DOT048_LCSSA:%.*]] = phi i32 [ poison, [[TMP28]] ], [ [[TMP30]], [[DOT_CRIT_EDGE_LOOPEXIT]] ]
+; CHECK-NEXT: [[DOT046_LCSSA:%.*]] = phi i64 [ poison, [[TMP28]] ], [ [[TMP31]], [[DOT_CRIT_EDGE_LOOPEXIT]] ]
+; CHECK-NEXT: [[TMP33:%.*]] = add nsw i32 [[DOT048_LCSSA]], [[DOT04961]]
+; CHECK-NEXT: [[TMP34:%.*]] = icmp ugt i32 [[TMP33]], 64
+; CHECK-NEXT: br i1 [[TMP34]], label [[TMP35:%.*]], label [[TMP39:%.*]]
+; CHECK: 35:
+; CHECK-NEXT: [[TMP36:%.*]] = add i32 [[DOT05160]], 1
+; CHECK-NEXT: [[TMP37:%.*]] = icmp ugt i32 [[TMP36]], 1
+; CHECK-NEXT: br i1 [[TMP37]], label [[TMP38:%.*]], label [[TMP39]]
+; CHECK: 38:
+; CHECK-NEXT: tail call void @Fatal(ptr @.str.7, i32 0)
+; CHECK-NEXT: br label [[TMP39]]
+; CHECK: 39:
+; CHECK-NEXT: [[DOT152:%.*]] = phi i32 [ [[DOT05160]], [[DOT_CRIT_EDGE]] ], [ [[TMP36]], [[TMP38]] ], [ [[TMP36]], [[TMP35]] ]
+; CHECK-NEXT: [[DOT150:%.*]] = phi i32 [ [[DOT04961]], [[DOT_CRIT_EDGE]] ], [ 0, [[TMP38]] ], [ 0, [[TMP35]] ]
+; CHECK-NEXT: [[TMP40:%.*]] = add i64 [[DOT046_LCSSA]], 4294967295
+; CHECK-NEXT: [[TMP41:%.*]] = trunc i64 [[TMP40]] to i32
+; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [26 x %struct.Letter], ptr @alPhrase, i64 0, i64 [[INDVARS_IV]], i32 2
+; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP42]], align 8, !tbaa [[TBAA11:![0-9]+]]
+; CHECK-NEXT: [[TMP43:%.*]] = zext i32 [[DOT150]] to i64
+; CHECK-NEXT: [[DOT046_:%.*]] = shl i64 [[DOT046_LCSSA]], [[TMP43]]
+; CHECK-NEXT: [[TMP44:%.*]] = zext i32 [[DOT152]] to i64
+; CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i64], ptr @aqMainSign, i64 0, i64 [[TMP44]]
+; CHECK-NEXT: [[TMP46:%.*]] = load i64, ptr [[TMP45]], align 8, !tbaa [[TBAA12:![0-9]+]]
+; CHECK-NEXT: [[TMP47:%.*]] = or i64 [[TMP46]], [[DOT046_]]
+; CHECK-NEXT: store i64 [[TMP47]], ptr [[TMP45]], align 8, !tbaa [[TBAA12]]
+; CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP23]], align 16, !tbaa [[TBAA9]]
+; CHECK-NEXT: [[TMP49:%.*]] = zext i32 [[TMP48]] to i64
+; CHECK-NEXT: [[TMP50:%.*]] = shl i64 [[TMP49]], [[TMP43]]
+; CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i64], ptr @aqMainMask, i64 0, i64 [[TMP44]]
+; CHECK-NEXT: [[TMP52:%.*]] = load i64, ptr [[TMP51]], align 8, !tbaa [[TBAA12]]
+; CHECK-NEXT: [[TMP53:%.*]] = or i64 [[TMP50]], [[TMP52]]
+; CHECK-NEXT: store i64 [[TMP53]], ptr [[TMP51]], align 8, !tbaa [[TBAA12]]
+; CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds [26 x %struct.Letter], ptr @alPhrase, i64 0, i64 [[INDVARS_IV]], i32 1
+; CHECK-NEXT: store i32 [[DOT150]], ptr [[TMP54]], align 4, !tbaa [[TBAA14:![0-9]+]]
+; CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds [26 x %struct.Letter], ptr @alPhrase, i64 0, i64 [[INDVARS_IV]], i32 3
+; CHECK-NEXT: store i32 [[DOT152]], ptr [[TMP55]], align 4, !tbaa [[TBAA15:![0-9]+]]
+; CHECK-NEXT: [[TMP56:%.*]] = add nsw i32 [[DOT150]], [[DOT048_LCSSA]]
+; CHECK-NEXT: br label [[TMP57]]
+; CHECK: 57:
+; CHECK-NEXT: [[DOT253]] = phi i32 [ [[DOT05160]], [[TMP27]] ], [ [[DOT152]], [[TMP39]] ]
+; CHECK-NEXT: [[DOT2]] = phi i32 [ [[DOT04961]], [[TMP27]] ], [ [[TMP56]], [[TMP39]] ]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 26
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOTPREHEADER]], label [[TMP58:%.*]]
+; CHECK: 58:
+; CHECK-NEXT: ret void
+;
tail call void @llvm.memset.p0.i64(ptr align 16 @alPhrase, i8 0, i64 416, i1 false)
tail call void @llvm.memset.p0.i64(ptr align 16 @aqMainMask, i8 0, i64 16, i1 false)
tail call void @llvm.memset.p0.i64(ptr align 16 @aqMainSign, i8 0, i64 16, i1 false)
@@ -113,7 +229,6 @@ define void @BuildMask(ptr nocapture readonly) local_unnamed_addr #0 {
; If we screw up the revisitation of the users of store of %sink above
; we will end up propagating and simplifying this to 1 in the final output
; because we keep an optimistic assumption we should not.
-; CHECK: add i32 %.05160, 1
%37 = add i32 %.05160, 1
%38 = icmp ugt i32 %37, 1
br i1 %38, label %39, label %40
@@ -193,3 +308,20 @@ attributes #5 = { nounwind readonly }
!14 = !{!"long", !3, i64 0}
!15 = !{!11, !2, i64 4}
!16 = !{!11, !2, i64 12}
+;.
+; CHECK: [[TBAA1]] = !{[[META2:![0-9]+]], [[META2]], i64 0}
+; CHECK: [[META2]] = !{!"int", [[META3:![0-9]+]], i64 0}
+; CHECK: [[META3]] = !{!"omnipotent char", [[META4:![0-9]+]], i64 0}
+; CHECK: [[META4]] = !{!"Simple C/C++ TBAA"}
+; CHECK: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
+; CHECK: [[META6]] = !{!"any pointer", [[META3]], i64 0}
+; CHECK: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+; CHECK: [[META8]] = !{!"short", [[META3]], i64 0}
+; CHECK: [[TBAA9]] = !{[[META10:![0-9]+]], [[META2]], i64 0}
+; CHECK: [[META10]] = !{!"", [[META2]], i64 0, [[META2]], i64 4, [[META2]], i64 8, [[META2]], i64 12}
+; CHECK: [[TBAA11]] = !{[[META10]], [[META2]], i64 8}
+; CHECK: [[TBAA12]] = !{[[META13:![0-9]+]], [[META13]], i64 0}
+; CHECK: [[META13]] = !{!"long", [[META3]], i64 0}
+; CHECK: [[TBAA14]] = !{[[META10]], [[META2]], i64 4}
+; CHECK: [[TBAA15]] = !{[[META10]], [[META2]], i64 12}
+;.
diff --git a/llvm/test/Transforms/NewGVN/metadata-nonnull.ll b/llvm/test/Transforms/NewGVN/metadata-nonnull.ll
index cd0922f..5de4c58 100644
--- a/llvm/test/Transforms/NewGVN/metadata-nonnull.ll
+++ b/llvm/test/Transforms/NewGVN/metadata-nonnull.ll
@@ -150,7 +150,7 @@ define ptr @test7(ptr %v0) {
; CHECK-LABEL: define ptr @test7
; CHECK-SAME: (ptr [[V0:%.*]]) {
; CHECK-NEXT: top:
-; CHECK-NEXT: [[V1:%.*]] = load ptr, ptr [[V0]], align 8, !nonnull !0
+; CHECK-NEXT: [[V1:%.*]] = load ptr, ptr [[V0]], align 8, !nonnull [[META0:![0-9]+]]
; CHECK-NEXT: call void @use2(ptr [[V1]])
; CHECK-NEXT: br i1 undef, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
diff --git a/llvm/test/Transforms/NewGVN/metadata-simplify.ll b/llvm/test/Transforms/NewGVN/metadata-simplify.ll
index a84c581..e981e37 100644
--- a/llvm/test/Transforms/NewGVN/metadata-simplify.ll
+++ b/llvm/test/Transforms/NewGVN/metadata-simplify.ll
@@ -9,11 +9,11 @@ define i1 @test1(ptr %arg, i1 %arg2) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[LOAD1:%.*]] = load ptr, ptr [[ARG:%.*]], !nonnull !0
+; CHECK-NEXT: [[LOAD1:%.*]] = load ptr, ptr [[ARG:%.*]], align 8, !nonnull [[META0:![0-9]+]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[LOAD1]], null
; CHECK-NEXT: ret i1 [[CMP1]]
; CHECK: bb2:
-; CHECK-NEXT: [[LOAD2:%.*]] = load ptr, ptr [[ARG]]
+; CHECK-NEXT: [[LOAD2:%.*]] = load ptr, ptr [[ARG]], align 8
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[LOAD2]], null
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -34,11 +34,11 @@ define i1 @test2(ptr %arg, i1 %arg2) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[LOAD1:%.*]] = load ptr, ptr [[ARG:%.*]]
+; CHECK-NEXT: [[LOAD1:%.*]] = load ptr, ptr [[ARG:%.*]], align 8
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[LOAD1]], null
; CHECK-NEXT: ret i1 [[CMP1]]
; CHECK: bb2:
-; CHECK-NEXT: [[LOAD2:%.*]] = load ptr, ptr [[ARG]], !nonnull !0
+; CHECK-NEXT: [[LOAD2:%.*]] = load ptr, ptr [[ARG]], align 8, !nonnull [[META0]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[LOAD2]], null
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -60,11 +60,11 @@ define i1 @test3(ptr %ptr, i1 %arg2) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]], !range !1
+; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !range [[RNG1:![0-9]+]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[LOAD1]], 999
; CHECK-NEXT: ret i1 [[CMP1]]
; CHECK: bb2:
-; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]]
+; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]], align 4
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[LOAD2]], 999
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -85,11 +85,11 @@ define i1 @test4(ptr %ptr, i1 %arg2) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]]
+; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[LOAD1]], 999
; CHECK-NEXT: ret i1 [[CMP1]]
; CHECK: bb2:
-; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]], !range !1
+; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]], align 4, !range [[RNG1]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[LOAD2]], 999
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -110,11 +110,11 @@ define i1 @test5(ptr %ptr, i1 %arg2) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]], !range !1
+; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !range [[RNG1]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[LOAD1]], 999
; CHECK-NEXT: ret i1 [[CMP1]]
; CHECK: bb2:
-; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]]
+; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]], align 4
; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[LOAD2]], 999
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -135,11 +135,11 @@ define i1 @test6(ptr %ptr, i1 %arg2) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]]
+; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[LOAD1]], 999
; CHECK-NEXT: ret i1 [[CMP1]]
; CHECK: bb2:
-; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]], !range !1
+; CHECK-NEXT: [[LOAD2:%.*]] = load i32, ptr [[PTR]], align 4, !range [[RNG1]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[LOAD2]], 999
; CHECK-NEXT: ret i1 [[CMP2]]
;
diff --git a/llvm/test/Transforms/NewGVN/noalias.ll b/llvm/test/Transforms/NewGVN/noalias.ll
index 5f0c5dd..2cb5c19 100644
--- a/llvm/test/Transforms/NewGVN/noalias.ll
+++ b/llvm/test/Transforms/NewGVN/noalias.ll
@@ -1,10 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
define i32 @test1(ptr %p, ptr %q) {
-; CHECK-LABEL: @test1(ptr %p, ptr %q)
-; CHECK: load i32, ptr %p
-; CHECK-NOT: noalias
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test1(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = load i32, ptr %p, !noalias !3
%b = load i32, ptr %p
%c = add i32 %a, %b
@@ -12,9 +15,12 @@ define i32 @test1(ptr %p, ptr %q) {
}
define i32 @test2(ptr %p, ptr %q) {
-; CHECK-LABEL: @test2(ptr %p, ptr %q)
-; CHECK: load i32, ptr %p, align 4, !alias.scope ![[SCOPE1:[0-9]+]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test2(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = load i32, ptr %p, !alias.scope !3
%b = load i32, ptr %p, !alias.scope !3
%c = add i32 %a, %b
@@ -22,17 +28,18 @@ define i32 @test2(ptr %p, ptr %q) {
}
define i32 @test3(ptr %p, ptr %q) {
-; CHECK-LABEL: @test3(ptr %p, ptr %q)
-; CHECK: load i32, ptr %p, align 4, !alias.scope ![[SCOPE2:[0-9]+]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test3(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4, !alias.scope [[META3:![0-9]+]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = load i32, ptr %p, !alias.scope !4
%b = load i32, ptr %p, !alias.scope !5
%c = add i32 %a, %b
ret i32 %c
}
-; CHECK: ![[SCOPE1]] = !{!{{[0-9]+}}}
-; CHECK: ![[SCOPE2]] = !{!{{[0-9]+}}, !{{[0-9]+}}}
declare i32 @foo(ptr) readonly
!0 = distinct !{!0, !2, !"callee0: %a"}
@@ -42,3 +49,10 @@ declare i32 @foo(ptr) readonly
!3 = !{!0}
!4 = !{!1}
!5 = !{!0, !1}
+;.
+; CHECK: [[META0]] = !{[[META1:![0-9]+]]}
+; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]], !"callee0: %a"}
+; CHECK: [[META2]] = distinct !{[[META2]], !"callee0"}
+; CHECK: [[META3]] = !{[[META4:![0-9]+]], [[META1]]}
+; CHECK: [[META4]] = distinct !{[[META4]], [[META2]], !"callee0: %b"}
+;.
diff --git a/llvm/test/Transforms/NewGVN/nomemlocation.ll b/llvm/test/Transforms/NewGVN/nomemlocation.ll
index 0f716b3..332e6c6 100644
--- a/llvm/test/Transforms/NewGVN/nomemlocation.ll
+++ b/llvm/test/Transforms/NewGVN/nomemlocation.ll
@@ -1,8 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -S -p='newgvn' | FileCheck %s
; MemorySSA should be able to handle a clobber query with an empty MemoryLocation.
-; CHECK: @userread
define ptr @userread(ptr %p) {
+; CHECK-LABEL: define ptr @userread(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[POS:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[DIFF:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[POS]]
+; CHECK-NEXT: [[LD:%.*]] = load ptr, ptr [[GEP]], align 8
+; CHECK-NEXT: [[READVAL:%.*]] = call i64 @fread(ptr noundef nonnull [[GEP]], i64 noundef 1, i64 noundef [[POS]], ptr noundef [[LD]])
+; CHECK-NEXT: [[READVALISPOS:%.*]] = icmp eq i64 [[READVAL]], [[POS]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[READVALISPOS]])
+; CHECK-NEXT: [[DIFF]] = sub i64 0, [[POS]]
+; CHECK-NEXT: br label [[LOOP]]
+;
entry:
br label %loop
diff --git a/llvm/test/Transforms/NewGVN/non-integral-pointers.ll b/llvm/test/Transforms/NewGVN/non-integral-pointers.ll
index 6119577..1e3da6e 100644
--- a/llvm/test/Transforms/NewGVN/non-integral-pointers.ll
+++ b/llvm/test/Transforms/NewGVN/non-integral-pointers.ll
@@ -1,37 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4"
target triple = "x86_64-unknown-linux-gnu"
define void @f0(i1 %alwaysFalse, i64 %val, ptr %loc) {
-; CHECK-LABEL: @f0(
-; CHECK-NOT: inttoptr
-; CHECK-NOT: ptrtoint
- entry:
+; CHECK-LABEL: define void @f0(
+; CHECK-SAME: i1 [[ALWAYSFALSE:%.*]], i64 [[VAL:%.*]], ptr [[LOC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store i64 [[VAL]], ptr [[LOC]], align 8
+; CHECK-NEXT: br i1 [[ALWAYSFALSE]], label [[NEVERTAKEN:%.*]], label [[ALWAYSTAKEN:%.*]]
+; CHECK: neverTaken:
+; CHECK-NEXT: [[PTR:%.*]] = load ptr addrspace(4), ptr [[LOC]], align 8
+; CHECK-NEXT: store i8 5, ptr addrspace(4) [[PTR]], align 1
+; CHECK-NEXT: ret void
+; CHECK: alwaysTaken:
+; CHECK-NEXT: ret void
+;
+ entry:
store i64 %val, ptr %loc
br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken
- neverTaken:
+ neverTaken:
%ptr = load ptr addrspace(4), ptr %loc
store i8 5, ptr addrspace(4) %ptr
ret void
- alwaysTaken:
+ alwaysTaken:
ret void
}
define i64 @f1(i1 %alwaysFalse, ptr addrspace(4) %val, ptr %loc) {
-; CHECK-LABEL: @f1(
-; CHECK-NOT: inttoptr
-; CHECK-NOT: ptrtoint
- entry:
+; CHECK-LABEL: define i64 @f1(
+; CHECK-SAME: i1 [[ALWAYSFALSE:%.*]], ptr addrspace(4) [[VAL:%.*]], ptr [[LOC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store ptr addrspace(4) [[VAL]], ptr [[LOC]], align 8
+; CHECK-NEXT: br i1 [[ALWAYSFALSE]], label [[NEVERTAKEN:%.*]], label [[ALWAYSTAKEN:%.*]]
+; CHECK: neverTaken:
+; CHECK-NEXT: [[INT:%.*]] = load i64, ptr [[LOC]], align 8
+; CHECK-NEXT: ret i64 [[INT]]
+; CHECK: alwaysTaken:
+; CHECK-NEXT: ret i64 42
+;
+ entry:
store ptr addrspace(4) %val, ptr %loc
br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken
- neverTaken:
+ neverTaken:
%int = load i64, ptr %loc
ret i64 %int
- alwaysTaken:
+ alwaysTaken:
ret i64 42
}
diff --git a/llvm/test/Transforms/NewGVN/null-aliases-nothing.ll b/llvm/test/Transforms/NewGVN/null-aliases-nothing.ll
index 6666211..25295fa 100644
--- a/llvm/test/Transforms/NewGVN/null-aliases-nothing.ll
+++ b/llvm/test/Transforms/NewGVN/null-aliases-nothing.ll
@@ -1,19 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
%t = type { i32 }
declare void @test1f(ptr)
define void @test1(ptr noalias %stuff ) {
- %before = load i32, ptr %stuff
+; CHECK-LABEL: define void @test1(
+; CHECK-SAME: ptr noalias [[STUFF:%.*]]) {
+; CHECK-NEXT: [[BEFORE:%.*]] = load i32, ptr [[STUFF]], align 4
+; CHECK-NEXT: call void @test1f(ptr null)
+; CHECK-NEXT: [[SUM:%.*]] = add i32 [[BEFORE]], [[BEFORE]]
+; CHECK-NEXT: store i32 [[SUM]], ptr [[STUFF]], align 4
+; CHECK-NEXT: ret void
+;
+ %before = load i32, ptr %stuff
- call void @test1f(ptr null)
+ call void @test1f(ptr null)
- %after = load i32, ptr %stuff ; <--- This should be a dead load
- %sum = add i32 %before, %after
+ %after = load i32, ptr %stuff ; <--- This should be a dead load
+ %sum = add i32 %before, %after
- store i32 %sum, ptr %stuff
- ret void
-; CHECK: load
-; CHECK-NOT: load
-; CHECK: ret void
+ store i32 %sum, ptr %stuff
+ ret void
}
diff --git a/llvm/test/Transforms/NewGVN/phi-edge-handling.ll b/llvm/test/Transforms/NewGVN/phi-edge-handling.ll
index 8dfac79..0e871b6 100644
--- a/llvm/test/Transforms/NewGVN/phi-edge-handling.ll
+++ b/llvm/test/Transforms/NewGVN/phi-edge-handling.ll
@@ -9,8 +9,8 @@ define i16 @hoge() {
; CHECK-LABEL: @hoge(
; CHECK-NEXT: bb:
; CHECK-NEXT: switch i8 undef, label [[BB7:%.*]] [
-; CHECK-NEXT: i8 0, label [[BB1:%.*]]
-; CHECK-NEXT: i8 12, label [[BB2:%.*]]
+; CHECK-NEXT: i8 0, label [[BB1:%.*]]
+; CHECK-NEXT: i8 12, label [[BB2:%.*]]
; CHECK-NEXT: ]
; CHECK: bb1:
; CHECK-NEXT: br label [[BB6:%.*]]
diff --git a/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll b/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll
index ac07148..573a4c0 100644
--- a/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll
+++ b/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll
@@ -88,8 +88,8 @@ define void @pr42422(i1 %c.1, i1 %c.2) {
; CHECK: bb16:
; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ poison, [[BB15]] ], [ 1, [[BB14]] ], [ 9, [[BB7]] ]
; CHECK-NEXT: switch i32 [[TMP17]], label [[BB19]] [
-; CHECK-NEXT: i32 0, label [[BB6]]
-; CHECK-NEXT: i32 9, label [[BB18:%.*]]
+; CHECK-NEXT: i32 0, label [[BB6]]
+; CHECK-NEXT: i32 9, label [[BB18:%.*]]
; CHECK-NEXT: ]
; CHECK: bb18:
; CHECK-NEXT: br label [[BB19]]
diff --git a/llvm/test/Transforms/NewGVN/phi-translate-partial-alias.ll b/llvm/test/Transforms/NewGVN/phi-translate-partial-alias.ll
index cb24b05..7dd4190 100644
--- a/llvm/test/Transforms/NewGVN/phi-translate-partial-alias.ll
+++ b/llvm/test/Transforms/NewGVN/phi-translate-partial-alias.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
@@ -6,12 +7,19 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; not actually redundant around the loop backedge, despite appearances
; if phi-translation is ignored.
-; CHECK: define void @test0(ptr %begin)
-; CHECK: loop:
-; CHECK: %l0 = load i8, ptr %phi
-; CHECK: call void @bar(i8 %l0)
-; CHECK: %l1 = load i8, ptr %phi
define void @test0(ptr %begin) {
+; CHECK-LABEL: define void @test0(
+; CHECK-SAME: ptr [[BEGIN:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[BEGIN]], [[ENTRY:%.*]] ], [ [[NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[L0:%.*]] = load i8, ptr [[PHI]], align 1
+; CHECK-NEXT: call void @bar(i8 [[L0]])
+; CHECK-NEXT: [[L1:%.*]] = load i8, ptr [[PHI]], align 1
+; CHECK-NEXT: [[NEXT]] = getelementptr inbounds i8, ptr [[PHI]], i8 [[L1]]
+; CHECK-NEXT: br label [[LOOP]]
+;
entry:
br label %loop
diff --git a/llvm/test/Transforms/NewGVN/pr17732.ll b/llvm/test/Transforms/NewGVN/pr17732.ll
index 427543d..e66547c 100644
--- a/llvm/test/Transforms/NewGVN/pr17732.ll
+++ b/llvm/test/Transforms/NewGVN/pr17732.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S -o - < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -12,6 +13,12 @@ target triple = "x86_64-unknown-linux-gnu"
@vector_with_zeroinit = common global %struct.with_vector zeroinitializer, align 4
define i32 @main() {
+; CHECK-LABEL: define i32 @main() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 @main.obj_with_array, i64 12, i1 false)
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @vector_with_zeroinit, ptr align 4 @main.obj_with_vector, i64 12, i1 false)
+; CHECK-NEXT: ret i32 1
+;
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 @main.obj_with_array, i64 12, i1 false)
%0 = load i8, ptr getelementptr inbounds (%struct.with_array, ptr @array_with_zeroinit, i64 0, i32 2), align 4
@@ -22,8 +29,6 @@ entry:
%conv1 = sext i8 %1 to i32
%and = and i32 %conv0, %conv1
ret i32 %and
-; CHECK-LABEL: define i32 @main(
-; CHECK: ret i32 1
}
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
diff --git a/llvm/test/Transforms/NewGVN/pr17852.ll b/llvm/test/Transforms/NewGVN/pr17852.ll
index 5858982..bffde12 100644
--- a/llvm/test/Transforms/NewGVN/pr17852.ll
+++ b/llvm/test/Transforms/NewGVN/pr17852.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=newgvn
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
%struct.S0 = type { [2 x i8], [2 x i8], [4 x i8], [2 x i8], i32, i32, i32, i32 }
diff --git a/llvm/test/Transforms/NewGVN/pr24397.ll b/llvm/test/Transforms/NewGVN/pr24397.ll
index f3f112a..d998144 100644
--- a/llvm/test/Transforms/NewGVN/pr24397.ll
+++ b/llvm/test/Transforms/NewGVN/pr24397.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -disable-output < %s
target triple = "x86_64-unknown-linux-gnu"
diff --git a/llvm/test/Transforms/NewGVN/pr24426.ll b/llvm/test/Transforms/NewGVN/pr24426.ll
index e8a88f5..8f0974a 100644
--- a/llvm/test/Transforms/NewGVN/pr24426.ll
+++ b/llvm/test/Transforms/NewGVN/pr24426.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=memcpyopt,mldst-motion,newgvn -S | FileCheck %s
declare void @check(i8)
@@ -5,11 +6,17 @@ declare void @check(i8)
declare void @write(ptr %res)
define void @test1() {
+; CHECK-LABEL: define void @test1() {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca [10 x i8], align 1
+; CHECK-NEXT: call void @write(ptr [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
+; CHECK-NEXT: call void @check(i8 [[TMP2]])
+; CHECK-NEXT: ret void
+;
%1 = alloca [10 x i8]
call void @write(ptr %1)
%2 = load i8, ptr %1
-; CHECK-NOT: undef
call void @check(i8 %2)
ret void
diff --git a/llvm/test/Transforms/NewGVN/pr25440.ll b/llvm/test/Transforms/NewGVN/pr25440.ll
index b3ebf44..9d9c4cd 100644
--- a/llvm/test/Transforms/NewGVN/pr25440.ll
+++ b/llvm/test/Transforms/NewGVN/pr25440.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
;RUN: opt -passes=newgvn -S < %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n8:16:32-S64"
@@ -10,19 +11,51 @@ target triple = "thumbv7--linux-gnueabi"
; Function Attrs: nounwind
define fastcc void @foo(ptr nocapture readonly %x) {
-;CHECK-LABEL: foo
+; CHECK-LABEL: define fastcc void @foo(
+; CHECK-SAME: ptr nocapture readonly [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[BB0:%.*]]
+; CHECK: bb0:
+; CHECK-NEXT: [[X_TR:%.*]] = phi ptr [ [[X]], [[ENTRY:%.*]] ], [ null, [[LAND_LHS_TRUE:%.*]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[X_TR]], align 4
+; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP0]] to i32
+; CHECK-NEXT: switch i32 [[CONV]], label [[IF_END_50:%.*]] [
+; CHECK-NEXT: i32 43, label [[CLEANUP:%.*]]
+; CHECK-NEXT: i32 52, label [[IF_THEN_5:%.*]]
+; CHECK-NEXT: ]
+; CHECK: if.then.5:
+; CHECK-NEXT: br i1 undef, label [[LAND_LHS_TRUE]], label [[IF_THEN_26:%.*]]
+; CHECK: land.lhs.true:
+; CHECK-NEXT: br i1 undef, label [[CLEANUP]], label [[BB0]]
+; CHECK: if.then.26:
+; CHECK-NEXT: br i1 undef, label [[COND_END:%.*]], label [[COND_FALSE:%.*]]
+; CHECK: cond.false:
+; CHECK-NEXT: [[MODE:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[X_TR]], i32 0, i32 1
+; CHECK-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[MODE]], align 2
+; CHECK-NEXT: br label [[COND_END]]
+; CHECK: cond.end:
+; CHECK-NEXT: br i1 undef, label [[IF_THEN_44:%.*]], label [[CLEANUP]]
+; CHECK: if.then.44:
+; CHECK-NEXT: unreachable
+; CHECK: if.end.50:
+; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds [0 x i32], ptr @length, i32 0, i32 [[CONV]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX52]], align 4
+; CHECK-NEXT: br i1 undef, label [[FOR_BODY_57:%.*]], label [[CLEANUP]]
+; CHECK: for.body.57:
+; CHECK-NEXT: unreachable
+; CHECK: cleanup:
+; CHECK-NEXT: ret void
+;
entry:
br label %bb0
bb0: ; preds = %land.lhs.true, %entry
-;CHECK: bb0:
%x.tr = phi ptr [ %x, %entry ], [ null, %land.lhs.true ]
%0 = load i16, ptr %x.tr, align 4
-; CHECK: load i16, ptr
%conv = zext i16 %0 to i32
switch i32 %conv, label %if.end.50 [
- i32 43, label %cleanup
- i32 52, label %if.then.5
+ i32 43, label %cleanup
+ i32 52, label %if.then.5
]
if.then.5: ; preds = %bb0
@@ -36,8 +69,6 @@ if.then.26: ; preds = %if.then.5
br i1 undef, label %cond.end, label %cond.false
cond.false: ; preds = %if.then.26
-; CHECK: cond.false:
-; CHECK: load i16
%mode = getelementptr inbounds %struct.a, ptr %x.tr.lcssa163, i32 0, i32 1
%bf.load = load i16, ptr %mode, align 2
%bf.shl = shl i16 %bf.load, 8
@@ -50,7 +81,6 @@ if.then.44: ; preds = %cond.end
unreachable
if.end.50: ; preds = %bb0
-;%CHECK: if.end.50:
%conv.lcssa = phi i32 [ %conv, %bb0 ]
%arrayidx52 = getelementptr inbounds [0 x i32], ptr @length, i32 0, i32 %conv.lcssa
%1 = load i32, ptr %arrayidx52, align 4
@@ -68,7 +98,34 @@ cleanup: ; preds = %if.end.50, %cond.en
@dfg_text = external global ptr, align 4
define void @dfg_lex() {
-;CHECK-LABEL: dfg_lex
+; CHECK-LABEL: define void @dfg_lex() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[WHILE_BODYTHREAD_PRE_SPLIT:%.*]]
+; CHECK: while.bodythread-pre-split:
+; CHECK-NEXT: br i1 undef, label [[IF_THEN_14:%.*]], label [[IF_END_15:%.*]]
+; CHECK: if.then.14:
+; CHECK-NEXT: [[V1:%.*]] = load i32, ptr @dfg_text, align 4
+; CHECK-NEXT: br label [[IF_END_15]]
+; CHECK: if.end.15:
+; CHECK-NEXT: [[V2:%.*]] = load ptr, ptr @yy_c_buf_p, align 4
+; CHECK-NEXT: br label [[WHILE_COND_16:%.*]]
+; CHECK: while.cond.16:
+; CHECK-NEXT: br i1 undef, label [[WHILE_COND_16]], label [[WHILE_END:%.*]]
+; CHECK: while.end:
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[V2]], i32 undef
+; CHECK-NEXT: store ptr [[ADD_PTR]], ptr @dfg_text, align 4
+; CHECK-NEXT: [[SUB_PTR_RHS_CAST25:%.*]] = ptrtoint ptr [[ADD_PTR]] to i32
+; CHECK-NEXT: switch i32 undef, label [[SW_DEFAULT:%.*]] [
+; CHECK-NEXT: i32 65, label [[WHILE_BODYTHREAD_PRE_SPLIT]]
+; CHECK-NEXT: i32 3, label [[RETURN:%.*]]
+; CHECK-NEXT: i32 57, label [[WHILE_BODYTHREAD_PRE_SPLIT]]
+; CHECK-NEXT: i32 60, label [[IF_THEN_14]]
+; CHECK-NEXT: ]
+; CHECK: sw.default:
+; CHECK-NEXT: unreachable
+; CHECK: return:
+; CHECK-NEXT: ret void
+;
entry:
br label %while.bodythread-pre-split
@@ -93,10 +150,10 @@ while.end: ; preds = %while.cond.16
%sub.ptr.rhs.cast25 = ptrtoint ptr %add.ptr to i32
%sub.ptr.sub26 = sub i32 0, %sub.ptr.rhs.cast25
switch i32 undef, label %sw.default [
- i32 65, label %while.bodythread-pre-split
- i32 3, label %return
- i32 57, label %while.bodythread-pre-split
- i32 60, label %if.then.14
+ i32 65, label %while.bodythread-pre-split
+ i32 3, label %return
+ i32 57, label %while.bodythread-pre-split
+ i32 60, label %if.then.14
]
sw.default: ; preds = %while.end
diff --git a/llvm/test/Transforms/NewGVN/pr28562.ll b/llvm/test/Transforms/NewGVN/pr28562.ll
index a62fdd3..320224c 100644
--- a/llvm/test/Transforms/NewGVN/pr28562.ll
+++ b/llvm/test/Transforms/NewGVN/pr28562.ll
@@ -1,9 +1,12 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S -passes=newgvn < %s | FileCheck %s
define ptr @test1(ptr %a) {
+; CHECK-LABEL: define ptr @test1(
+; CHECK-SAME: ptr [[A:%.*]]) {
+; CHECK-NEXT: [[X2:%.*]] = getelementptr i32, ptr [[A]], i32 10
+; CHECK-NEXT: ret ptr [[X2]]
+;
%x1 = getelementptr inbounds i32, ptr %a, i32 10
%x2 = getelementptr i32, ptr %a, i32 10
ret ptr %x2
-; CHECK-LABEL: @test1(
-; CHECK: %[[x:.*]] = getelementptr i32, ptr %a, i32 10
-; CHECK: ret ptr %[[x]]
}
diff --git a/llvm/test/Transforms/NewGVN/pr31472.ll b/llvm/test/Transforms/NewGVN/pr31472.ll
index 8bb9a14..8eeb614 100644
--- a/llvm/test/Transforms/NewGVN/pr31472.ll
+++ b/llvm/test/Transforms/NewGVN/pr31472.ll
@@ -9,11 +9,13 @@ target triple = "x86_64-apple-macosx10.12.0"
define i32 @main() personality ptr @__gxx_personality_v0{
; CHECK-LABEL: @main(
; CHECK-NEXT: [[TMP1:%.*]] = invoke i32 @foo()
-; CHECK-NEXT: to label %good unwind label %bad
+; CHECK-NEXT: to label [[GOOD:%.*]] unwind label [[BAD:%.*]]
; CHECK: good:
; CHECK-NEXT: ret i32 5
; CHECK: bad:
-; CHECK-NEXT: [[TMP2:%.*]] = landingpad { ptr, i32
+; CHECK-NEXT: [[TMP2:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: resume { ptr, i32 } [[TMP2]]
;
%1 = invoke i32 @foo()
to label %good unwind label %bad
diff --git a/llvm/test/Transforms/NewGVN/pr31483.ll b/llvm/test/Transforms/NewGVN/pr31483.ll
index fe957de..82e9a2a 100644
--- a/llvm/test/Transforms/NewGVN/pr31483.ll
+++ b/llvm/test/Transforms/NewGVN/pr31483.ll
@@ -10,20 +10,20 @@ define signext i32 @ham(ptr %arg, ptr %arg1) #0 {
; CHECK-LABEL: @ham(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: store ptr %arg1, ptr [[TMP]], align 8
-; CHECK-NEXT: br label %bb2
+; CHECK-NEXT: store ptr [[ARG1:%.*]], ptr [[TMP]], align 8
+; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP3:%.*]] = phi ptr [ %arg, %bb ], [ %tmp7, %bb22 ]
+; CHECK-NEXT: [[TMP3:%.*]] = phi ptr [ [[ARG:%.*]], [[BB:%.*]] ], [ [[TMP7:%.*]], [[BB22:%.*]] ]
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
-; CHECK-NEXT: br i1 [[TMP5]], label %bb6, label %bb23
+; CHECK-NEXT: br i1 [[TMP5]], label [[BB6:%.*]], label [[BB23:%.*]]
; CHECK: bb6:
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP7]] = getelementptr inbounds i8, ptr [[TMP3]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = zext i8 [[TMP4]] to i32
-; CHECK-NEXT: switch i32 [[TMP9]], label %bb22 [
-; CHECK-NEXT: i32 115, label %bb10
-; CHECK-NEXT: i32 105, label %bb16
-; CHECK-NEXT: i32 99, label %bb16
+; CHECK-NEXT: switch i32 [[TMP9]], label [[BB22]] [
+; CHECK-NEXT: i32 115, label [[BB10:%.*]]
+; CHECK-NEXT: i32 105, label [[BB16:%.*]]
+; CHECK-NEXT: i32 99, label [[BB16]]
; CHECK-NEXT: ]
; CHECK: bb10:
; CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP]], align 8
@@ -31,17 +31,17 @@ define signext i32 @ham(ptr %arg, ptr %arg1) #0 {
; CHECK-NEXT: store ptr [[TMP12]], ptr [[TMP]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP11]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = call signext i32 (ptr, ...) @zot(ptr @global, ptr [[TMP14]])
-; CHECK-NEXT: br label %bb22
+; CHECK-NEXT: br label [[BB22]]
; CHECK: bb16:
; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP]], align 8
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP17]], i64 8
; CHECK-NEXT: store ptr [[TMP18]], ptr [[TMP]], align 8
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP17]], i64 4
-; CHECK-NEXT: br label %bb22
+; CHECK-NEXT: br label [[BB22]]
; CHECK: bb22:
-; CHECK-NEXT: br label %bb2
+; CHECK-NEXT: br label [[BB2]]
; CHECK: bb23:
-; CHECK-NEXT: call void @llvm.va_end(ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[TMP]])
; CHECK-NEXT: ret i32 undef
;
bb:
diff --git a/llvm/test/Transforms/NewGVN/pr31491.ll b/llvm/test/Transforms/NewGVN/pr31491.ll
index 5f6b371..f27f13e 100644
--- a/llvm/test/Transforms/NewGVN/pr31491.ll
+++ b/llvm/test/Transforms/NewGVN/pr31491.ll
@@ -7,13 +7,13 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define internal i32 @pr31491() {
; CHECK-LABEL: @pr31491(
; CHECK-NEXT: bb5:
-; CHECK-NEXT: br label %bb7
+; CHECK-NEXT: br label [[BB7:%.*]]
; CHECK: bb7:
-; CHECK-NEXT: [[TMP:%.*]] = phi ptr [ [[TMP:%.*]]11, %bb10 ], [ undef, %bb5 ]
-; CHECK-NEXT: br label %bb10
+; CHECK-NEXT: [[TMP:%.*]] = phi ptr [ [[TMP11:%.*]], [[BB10:%.*]] ], [ undef, [[BB5:%.*]] ]
+; CHECK-NEXT: br label [[BB10]]
; CHECK: bb10:
-; CHECK-NEXT: [[TMP11:%.*]] = tail call ptr @patatino(ptr [[TMP]])
-; CHECK-NEXT: br label %bb7
+; CHECK-NEXT: [[TMP11]] = tail call ptr @patatino(ptr [[TMP]])
+; CHECK-NEXT: br label [[BB7]]
;
bb5:
br label %bb7
diff --git a/llvm/test/Transforms/NewGVN/pr31501.ll b/llvm/test/Transforms/NewGVN/pr31501.ll
index 55195fd..18bfcd1 100644
--- a/llvm/test/Transforms/NewGVN/pr31501.ll
+++ b/llvm/test/Transforms/NewGVN/pr31501.ll
@@ -52,30 +52,30 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define weak_odr hidden ptr @quux(ptr %arg, ptr %arg1) local_unnamed_addr #0 align 2 {
; CHECK-LABEL: @quux(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds %struct.barney, ptr %arg, i64 0, i32 3, i32 0, i32 0, i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !tbaa !2
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds %struct.barney, ptr %arg, i64 0, i32 3, i32 0, i32 0, i32 0, i32 0, i32 1
-; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP4]], align 8, !tbaa !7
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT_BARNEY:%.*]], ptr [[ARG:%.*]], i64 0, i32 3, i32 0, i32 0, i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !tbaa [[TBAA2:![0-9]+]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_BARNEY]], ptr [[ARG]], i64 0, i32 3, i32 0, i32 0, i32 0, i32 0, i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP4]], align 8, !tbaa [[TBAA7:![0-9]+]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq ptr [[TMP3]], [[TMP6]]
-; CHECK-NEXT: br i1 [[TMP7]], label %bb21, label %bb8
+; CHECK-NEXT: br i1 [[TMP7]], label [[BB21:%.*]], label [[BB8:%.*]]
; CHECK: bb8:
-; CHECK-NEXT: br label %bb11
+; CHECK-NEXT: br label [[BB11:%.*]]
; CHECK: bb9:
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq ptr [[TMP18:%.*]], [[TMP6]]
-; CHECK-NEXT: br i1 [[TMP10]], label %bb19, label %bb11
+; CHECK-NEXT: br i1 [[TMP10]], label [[BB19:%.*]], label [[BB11]]
; CHECK: bb11:
-; CHECK-NEXT: [[TMP12:%.*]] = phi ptr [ [[TMP17:%.*]], %bb9 ], [ undef, %bb8 ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi ptr [ [[TMP18]], %bb9 ], [ [[TMP3]], %bb8 ]
-; CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP13]], align 8, !tbaa !8
-; CHECK-NEXT: [[TMP16:%.*]] = icmp eq ptr [[TMP15]], %arg1
+; CHECK-NEXT: [[TMP12:%.*]] = phi ptr [ [[TMP17:%.*]], [[BB9:%.*]] ], [ undef, [[BB8]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi ptr [ [[TMP18]], [[BB9]] ], [ [[TMP3]], [[BB8]] ]
+; CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP13]], align 8, !tbaa [[TBAA8:![0-9]+]]
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq ptr [[TMP15]], [[ARG1:%.*]]
; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], ptr [[TMP13]], ptr [[TMP12]]
-; CHECK-NEXT: [[TMP18]] = getelementptr inbounds %struct.foo, ptr [[TMP13]], i64 1
-; CHECK-NEXT: br i1 [[TMP16]], label %bb19, label %bb9
+; CHECK-NEXT: [[TMP18]] = getelementptr inbounds [[STRUCT_FOO:%.*]], ptr [[TMP13]], i64 1
+; CHECK-NEXT: br i1 [[TMP16]], label [[BB19]], label [[BB9]]
; CHECK: bb19:
-; CHECK-NEXT: [[TMP20:%.*]] = phi ptr [ null, %bb9 ], [ [[TMP17]], %bb11 ]
-; CHECK-NEXT: br label %bb21
+; CHECK-NEXT: [[TMP20:%.*]] = phi ptr [ null, [[BB9]] ], [ [[TMP17]], [[BB11]] ]
+; CHECK-NEXT: br label [[BB21]]
; CHECK: bb21:
-; CHECK-NEXT: [[TMP22:%.*]] = phi ptr [ null, %bb ], [ [[TMP20]], %bb19 ]
+; CHECK-NEXT: [[TMP22:%.*]] = phi ptr [ null, [[BB:%.*]] ], [ [[TMP20]], [[BB19]] ]
; CHECK-NEXT: ret ptr [[TMP22]]
;
bb:
diff --git a/llvm/test/Transforms/NewGVN/pr31573.ll b/llvm/test/Transforms/NewGVN/pr31573.ll
index 2382c487..7835e9d 100644
--- a/llvm/test/Transforms/NewGVN/pr31573.ll
+++ b/llvm/test/Transforms/NewGVN/pr31573.ll
@@ -10,7 +10,7 @@ define void @patatino(ptr %blah) {
; CHECK: while.cond:
; CHECK-NEXT: [[MEH:%.*]] = phi ptr [ [[BLAH:%.*]], [[ENTRY:%.*]] ], [ null, [[WHILE_BODY:%.*]] ]
; CHECK-NEXT: switch i32 undef, label [[WHILE_BODY]] [
-; CHECK-NEXT: i32 666, label [[WHILE_END:%.*]]
+; CHECK-NEXT: i32 666, label [[WHILE_END:%.*]]
; CHECK-NEXT: ]
; CHECK: while.body:
; CHECK-NEXT: br label [[WHILE_COND]]
diff --git a/llvm/test/Transforms/NewGVN/pr31594.ll b/llvm/test/Transforms/NewGVN/pr31594.ll
index 47294d5..d1a02d6 100644
--- a/llvm/test/Transforms/NewGVN/pr31594.ll
+++ b/llvm/test/Transforms/NewGVN/pr31594.ll
@@ -10,8 +10,8 @@ define i1 @patatino(ptr %blah, i32 %choice) {
; CHECK: while.cond:
; CHECK-NEXT: [[FOO:%.*]] = phi ptr [ [[BLAH:%.*]], [[ENTRY:%.*]] ], [ null, [[WHILE_BODY:%.*]] ]
; CHECK-NEXT: switch i32 [[CHOICE:%.*]], label [[WHILE_BODY]] [
-; CHECK-NEXT: i32 -1, label [[WHILE_END:%.*]]
-; CHECK-NEXT: i32 40, label [[LAND_END:%.*]]
+; CHECK-NEXT: i32 -1, label [[WHILE_END:%.*]]
+; CHECK-NEXT: i32 40, label [[LAND_END:%.*]]
; CHECK-NEXT: ]
; CHECK: land.end:
; CHECK-NEXT: br label [[WHILE_END]]
@@ -66,7 +66,7 @@ define void @foo(ptr %arg) {
; CHECK: bb2:
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb3:
-; CHECK-NEXT: store i8 0, ptr [[TMP]], align 1, !g !0
+; CHECK-NEXT: store i8 0, ptr [[TMP]], align 1, !g [[META0:![0-9]+]]
; CHECK-NEXT: br label [[BB4:%.*]]
; CHECK: bb4:
; CHECK-NEXT: br label [[BB6:%.*]]
@@ -74,13 +74,13 @@ define void @foo(ptr %arg) {
; CHECK-NEXT: br i1 undef, label [[BB9:%.*]], label [[BB7:%.*]]
; CHECK: bb7:
; CHECK-NEXT: switch i8 0, label [[BB6]] [
-; CHECK-NEXT: i8 6, label [[BB8:%.*]]
+; CHECK-NEXT: i8 6, label [[BB8:%.*]]
; CHECK-NEXT: ]
; CHECK: bb8:
; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[BB4]]
; CHECK: bb9:
-; CHECK-NEXT: store i8 0, ptr [[ARG]], align 1, !g !0
+; CHECK-NEXT: store i8 0, ptr [[ARG]], align 1, !g [[META0]]
; CHECK-NEXT: unreachable
;
bb:
diff --git a/llvm/test/Transforms/NewGVN/pr31613.ll b/llvm/test/Transforms/NewGVN/pr31613.ll
index 943cdbc..0bcf86a 100644
--- a/llvm/test/Transforms/NewGVN/pr31613.ll
+++ b/llvm/test/Transforms/NewGVN/pr31613.ll
@@ -74,7 +74,7 @@ declare void @c.d.p(i64, ptr)
define void @e(i32 %a0, i32 %a1, ptr %p2) {
; CHECK-LABEL: @e(
; CHECK-NEXT: [[F:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 [[A0:%.*]], ptr [[F]], align 4, !g !0
+; CHECK-NEXT: store i32 [[A0:%.*]], ptr [[F]], align 4, !g [[META0:![0-9]+]]
; CHECK-NEXT: br label [[H:%.*]]
; CHECK: h:
; CHECK-NEXT: call void @c.d.p(i64 8, ptr undef)
@@ -88,10 +88,10 @@ define void @e(i32 %a0, i32 %a1, ptr %p2) {
; CHECK-NEXT: br label [[R]]
; CHECK: r:
; CHECK-NEXT: switch i32 undef, label [[N:%.*]] [
-; CHECK-NEXT: i32 0, label [[S:%.*]]
+; CHECK-NEXT: i32 0, label [[S:%.*]]
; CHECK-NEXT: ]
; CHECK: s:
-; CHECK-NEXT: store i32 [[A1:%.*]], ptr [[F]], align 4, !g !0
+; CHECK-NEXT: store i32 [[A1:%.*]], ptr [[F]], align 4, !g [[META0]]
; CHECK-NEXT: br label [[H]]
; CHECK: n:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/NewGVN/pr31682.ll b/llvm/test/Transforms/NewGVN/pr31682.ll
index 00a1bf2..3d8c9e2 100644
--- a/llvm/test/Transforms/NewGVN/pr31682.ll
+++ b/llvm/test/Transforms/NewGVN/pr31682.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define void @bar() {
; CHECK-LABEL: @bar(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP:%.*]] = load ptr, ptr @global
+; CHECK-NEXT: [[TMP:%.*]] = load ptr, ptr @global, align 8
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br i1 undef, label [[BB2]], label [[BB7:%.*]]
diff --git a/llvm/test/Transforms/NewGVN/pr31758.ll b/llvm/test/Transforms/NewGVN/pr31758.ll
index 4318843..274d605 100644
--- a/llvm/test/Transforms/NewGVN/pr31758.ll
+++ b/llvm/test/Transforms/NewGVN/pr31758.ll
@@ -12,7 +12,7 @@ define void @tinkywinky() {
; CHECK: bb90:
; CHECK-NEXT: br label [[BB90]]
; CHECK: bb138:
-; CHECK-NEXT: store i8 poison, ptr null
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[BB138:%.*]]
;
bb:
diff --git a/llvm/test/Transforms/NewGVN/pr32607.ll b/llvm/test/Transforms/NewGVN/pr32607.ll
index 4770724..7460ab5 100644
--- a/llvm/test/Transforms/NewGVN/pr32607.ll
+++ b/llvm/test/Transforms/NewGVN/pr32607.ll
@@ -7,7 +7,7 @@ define hidden void @foo() {
; CHECK: if:
; CHECK-NEXT: br i1 false, label [[L50:%.*]], label [[IF]]
; CHECK: L50:
-; CHECK-NEXT: store i8 poison, ptr null
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: ret void
;
top:
diff --git a/llvm/test/Transforms/NewGVN/pr32836.ll b/llvm/test/Transforms/NewGVN/pr32836.ll
index 5488655..00f3fb0 100644
--- a/llvm/test/Transforms/NewGVN/pr32836.ll
+++ b/llvm/test/Transforms/NewGVN/pr32836.ll
@@ -5,19 +5,19 @@
@b = external global %struct.anon
define void @tinkywinky(i1 %patatino) {
; CHECK-LABEL: @tinkywinky(
-; CHECK-NEXT: store i32 8, ptr null
+; CHECK-NEXT: store i32 8, ptr null, align 4
; CHECK-NEXT: br i1 [[PATATINO:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br label [[L:%.*]]
; CHECK: L:
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr null
-; CHECK-NEXT: [[BF_LOAD1:%.*]] = load i32, ptr @b
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr null, align 4
+; CHECK-NEXT: [[BF_LOAD1:%.*]] = load i32, ptr @b, align 4
; CHECK-NEXT: [[BF_VALUE:%.*]] = and i32 [[TMP1]], 536870911
; CHECK-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -536870912
; CHECK-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-; CHECK-NEXT: store i32 [[BF_SET]], ptr @b
+; CHECK-NEXT: store i32 [[BF_SET]], ptr @b, align 4
; CHECK-NEXT: br label [[LOR_END:%.*]]
; CHECK: lor.end:
; CHECK-NEXT: br label [[L]]
diff --git a/llvm/test/Transforms/NewGVN/pr32838.ll b/llvm/test/Transforms/NewGVN/pr32838.ll
index 5ba68fa..87c93d9 100644
--- a/llvm/test/Transforms/NewGVN/pr32838.ll
+++ b/llvm/test/Transforms/NewGVN/pr32838.ll
@@ -10,7 +10,7 @@ define void @fn1(i64 noundef %arg) {
; CHECK: if.then:
; CHECK-NEXT: br i1 false, label [[FIRSTPHIBLOCK:%.*]], label [[TEMP:%.*]]
; CHECK: firstphiblock:
-; CHECK-NEXT: br i1 undef, label %for.cond17thread-pre-split, label [[SECONDPHIBLOCK:%.*]]
+; CHECK-NEXT: br i1 undef, label [[FOR_COND17THREAD_PRE_SPLIT:%.*]], label [[SECONDPHIBLOCK:%.*]]
; CHECK: secondphiblock:
; CHECK-NEXT: [[SECONDPHI:%.*]] = phi i64 [ [[THIRDPHI:%.*]], [[THIRDPHIBLOCK:%.*]] ], [ undef, [[FIRSTPHIBLOCK]] ]
; CHECK-NEXT: br i1 undef, label [[FIRSTPHIBLOCK]], label [[THIRDPHIBLOCK]]
@@ -55,7 +55,7 @@ define void @fn2(i64 noundef %arg) {
; CHECK-NEXT: br i1 false, label [[FIRSTPHIBLOCK:%.*]], label [[TEMP:%.*]]
; CHECK: firstphiblock:
; CHECK-NEXT: [[FIRSTPHI:%.*]] = phi i64 [ poison, [[IF_THEN]] ], [ [[SECONDPHI:%.*]], [[SECONDPHIBLOCK:%.*]] ]
-; CHECK-NEXT: br i1 undef, label %for.cond17thread-pre-split, label [[SECONDPHIBLOCK]]
+; CHECK-NEXT: br i1 undef, label [[FOR_COND17THREAD_PRE_SPLIT:%.*]], label [[SECONDPHIBLOCK]]
; CHECK: secondphiblock:
; CHECK-NEXT: [[SECONDPHI]] = phi i64 [ [[THIRDPHI:%.*]], [[THIRDPHIBLOCK:%.*]] ], [ [[FIRSTPHI]], [[FIRSTPHIBLOCK]] ]
; CHECK-NEXT: br i1 undef, label [[FIRSTPHIBLOCK]], label [[THIRDPHIBLOCK]]
@@ -65,7 +65,7 @@ define void @fn2(i64 noundef %arg) {
; CHECK: for.cond17thread-pre-split:
; CHECK-NEXT: br label [[COND_TRUE]]
; CHECK: cond.true:
-; CHECK-NEXT: [[FOURTHPHI:%.*]] = phi i64 [ [[ARG:%.*]], [[ENTRY:%.*]] ], [ [[FIRSTPHI]], %for.cond17thread-pre-split ]
+; CHECK-NEXT: [[FOURTHPHI:%.*]] = phi i64 [ [[ARG:%.*]], [[ENTRY:%.*]] ], [ [[FIRSTPHI]], [[FOR_COND17THREAD_PRE_SPLIT]] ]
; CHECK-NEXT: [[DIV]] = sdiv i64 [[FOURTHPHI]], 4
; CHECK-NEXT: br label [[THIRDPHIBLOCK]]
; CHECK: temp:
@@ -105,7 +105,7 @@ define void @fn3() {
; CHECK-NEXT: [[F_0:%.*]] = phi ptr [ @b, [[ENTRY:%.*]] ], [ @a, [[L1_LOOPEXIT:%.*]] ]
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond.loopexit:
-; CHECK-NEXT: store i8 poison, ptr null
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.cond:
; CHECK-NEXT: br i1 undef, label [[FOR_END14:%.*]], label [[FOR_COND1_PREHEADER:%.*]]
diff --git a/llvm/test/Transforms/NewGVN/pr32845.ll b/llvm/test/Transforms/NewGVN/pr32845.ll
index d1182a6..29b81b8 100644
--- a/llvm/test/Transforms/NewGVN/pr32845.ll
+++ b/llvm/test/Transforms/NewGVN/pr32845.ll
@@ -13,7 +13,7 @@ define void @tinkywinky() {
; CHECK-NEXT: [[F_0:%.*]] = phi ptr [ @b, [[ENTRY:%.*]] ], [ @a, [[L1_LOOPEXIT:%.*]] ]
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond.loopexit:
-; CHECK-NEXT: store i8 poison, ptr null
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.cond:
; CHECK-NEXT: br i1 undef, label [[FOR_END14:%.*]], label [[FOR_COND1_PREHEADER:%.*]]
diff --git a/llvm/test/Transforms/NewGVN/pr32852.ll b/llvm/test/Transforms/NewGVN/pr32852.ll
index 4fd5cf1..ad5badd 100644
--- a/llvm/test/Transforms/NewGVN/pr32852.ll
+++ b/llvm/test/Transforms/NewGVN/pr32852.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; Make sure GVN doesn't incorrectly think the branch terminating
; bb2 has a constant condition.
; RUN: opt -S -passes=newgvn %s | FileCheck %s
@@ -6,13 +7,26 @@
@patatino = private unnamed_addr constant [3 x i8] c"0\0A\00"
define void @tinkywinky() {
+; CHECK-LABEL: define void @tinkywinky() {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr @a, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sge i32 [[TMP]], 0
+; CHECK-NEXT: br i1 [[TMP1]], label [[BB2:%.*]], label [[BB7:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP]], 0
+; CHECK-NEXT: br i1 [[TMP4]], label [[BB5:%.*]], label [[BB7]]
+; CHECK: bb5:
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 (ptr, ...) @printf(ptr @patatino)
+; CHECK-NEXT: br label [[BB7]]
+; CHECK: bb7:
+; CHECK-NEXT: ret void
+;
bb:
%tmp = load i32, ptr @a
%tmp1 = icmp sge i32 %tmp, 0
br i1 %tmp1, label %bb2, label %bb7
bb2:
%tmp4 = icmp sgt i32 %tmp, 0
-; CHECK: br i1 %tmp4, label %bb5, label %bb7
br i1 %tmp4, label %bb5, label %bb7
bb5:
%tmp6 = call i32 (ptr, ...) @printf(ptr @patatino)
diff --git a/llvm/test/Transforms/NewGVN/pr32897.ll b/llvm/test/Transforms/NewGVN/pr32897.ll
index 35a3b00..881c3a8 100644
--- a/llvm/test/Transforms/NewGVN/pr32897.ll
+++ b/llvm/test/Transforms/NewGVN/pr32897.ll
@@ -6,7 +6,7 @@ define void @tinkywinky(ptr %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BODY:%.*]]
; CHECK: body:
-; CHECK-NEXT: store i64 undef, ptr [[B:%.*]]
+; CHECK-NEXT: store i64 undef, ptr [[B:%.*]], align 4
; CHECK-NEXT: br i1 undef, label [[BODY]], label [[END:%.*]]
; CHECK: end:
; CHECK-NEXT: br label [[BODY]]
diff --git a/llvm/test/Transforms/NewGVN/pr32934.ll b/llvm/test/Transforms/NewGVN/pr32934.ll
index fa725f88..c8218c2 100644
--- a/llvm/test/Transforms/NewGVN/pr32934.ll
+++ b/llvm/test/Transforms/NewGVN/pr32934.ll
@@ -1,39 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S -passes=newgvn %s | FileCheck %s
-; CHECK: define void @tinkywinky() {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %d = alloca i32, align 4
-; CHECK-NEXT: store i32 0, ptr null, align 4
-; CHECK-NEXT: br label %for.cond
-; CHECK: for.cond: ; preds = %if.end, %entry
-; CHECK-NEXT: %0 = load i32, ptr null, align 4
-; CHECK-NEXT: %cmp = icmp slt i32 %0, 1
-; CHECK-NEXT: br i1 %cmp, label %for.body, label %while.cond
-; CHECK: for.body: ; preds = %for.cond
-; CHECK-NEXT: %1 = load i32, ptr @a, align 4
-; CHECK-NEXT: store i32 %1, ptr %d, align 4
-; CHECK-NEXT: br label %L
-; CHECK: L: ; preds = %if.then, %for.body
-; CHECK-NEXT: %tobool = icmp ne i32 %1, 0
-; CHECK-NEXT: br i1 %tobool, label %if.then, label %if.end
-; CHECK: if.then: ; preds = %L
-; CHECK-NEXT: call void (ptr, ...) @printf(ptr @patatino)
-; CHECK-NEXT: br label %L
-; CHECK: if.end: ; preds = %L
-; CHECK-NEXT: br label %for.cond
-; CHECK: while.cond: ; preds = %while.body, %for.cond
-; CHECK-NEXT: br i1 undef, label %while.body, label %while.end
-; CHECK: while.body: ; preds = %while.cond
-; CHECK-NEXT: call void (ptr, ...) @printf(ptr @patatino)
-; CHECK-NEXT: br label %while.cond
-; CHECK: while.end:
-; CHECK-NEXT: %2 = load i32, ptr @a, align 4
-; CHECK-NEXT: store i32 %2, ptr undef, align 4
-; CHECK-NEXT: ret void
@a = external global i32, align 4
@patatino = external unnamed_addr constant [2 x i8], align 1
define void @tinkywinky() {
+; CHECK-LABEL: define void @tinkywinky() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
+; CHECK-NEXT: store i32 0, ptr null, align 4
+; CHECK-NEXT: br label [[FOR_COND:%.*]]
+; CHECK: for.cond:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr null, align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 1
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[WHILE_COND:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4
+; CHECK-NEXT: store i32 [[TMP1]], ptr [[D]], align 4
+; CHECK-NEXT: br label [[L:%.*]]
+; CHECK: L:
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: call void (ptr, ...) @printf(ptr @patatino)
+; CHECK-NEXT: br label [[L]]
+; CHECK: if.end:
+; CHECK-NEXT: br label [[FOR_COND]]
+; CHECK: while.cond:
+; CHECK-NEXT: br i1 undef, label [[WHILE_BODY:%.*]], label [[WHILE_END:%.*]]
+; CHECK: while.body:
+; CHECK-NEXT: call void (ptr, ...) @printf(ptr @patatino)
+; CHECK-NEXT: br label [[WHILE_COND]]
+; CHECK: while.end:
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @a, align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr undef, align 4
+; CHECK-NEXT: ret void
+;
entry:
%d = alloca i32, align 4
store i32 0, ptr null, align 4
diff --git a/llvm/test/Transforms/NewGVN/pr32945.ll b/llvm/test/Transforms/NewGVN/pr32945.ll
index ebf3813..7aabe4d 100644
--- a/llvm/test/Transforms/NewGVN/pr32945.ll
+++ b/llvm/test/Transforms/NewGVN/pr32945.ll
@@ -1,9 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S -passes=newgvn %s | FileCheck %s
-; CHECK-NOT: call i32 @llvm.ssa.copy
@d = external global i32
@e = external global i32
define void @tinkywinky() {
+; CHECK-LABEL: define void @tinkywinky() {
+; CHECK-NEXT: br i1 true, label [[LOR_LHS_FALSE:%.*]], label [[COND_TRUE:%.*]]
+; CHECK: lor.lhs.false:
+; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr @d, align 4
+; CHECK-NEXT: [[PATATINO:%.*]] = load i32, ptr null, align 4
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[TMP]], [[PATATINO]]
+; CHECK-NEXT: store i32 [[OR]], ptr @d, align 4
+; CHECK-NEXT: br label [[COND_TRUE]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @e, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @d, align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE6:%.*]], label [[COND_FALSE:%.*]]
+; CHECK: cond.true6:
+; CHECK-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[CMP7]], label [[COND_FALSE]], label [[COND_FALSE]]
+; CHECK: cond.false:
+; CHECK-NEXT: ret void
+;
br i1 true, label %lor.lhs.false, label %cond.true
lor.lhs.false:
%tmp = load i32, ptr @d, align 4
diff --git a/llvm/test/Transforms/NewGVN/pr32952.ll b/llvm/test/Transforms/NewGVN/pr32952.ll
index 5157bb2..49e4843 100644
--- a/llvm/test/Transforms/NewGVN/pr32952.ll
+++ b/llvm/test/Transforms/NewGVN/pr32952.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; PR32952: Don't erroneously consider congruent two phi nodes which
; have the same arguments but different incoming edges.
; RUN: opt -passes=newgvn -S %s | FileCheck %s
@@ -6,6 +7,31 @@
@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
define i32 @tinkywinky() {
+; CHECK-LABEL: define i32 @tinkywinky() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @a, align 2
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[CONV]], -1
+; CHECK-NEXT: [[CONV1:%.*]] = trunc i32 [[NEG]] to i16
+; CHECK-NEXT: [[CONV3:%.*]] = zext i16 [[CONV1]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[CONV3]]
+; CHECK-NEXT: br i1 [[CMP]], label [[TINKY:%.*]], label [[WINKY:%.*]]
+; CHECK: tinky:
+; CHECK-NEXT: store i16 2, ptr @a, align 2
+; CHECK-NEXT: br label [[PATATINO:%.*]]
+; CHECK: winky:
+; CHECK-NEXT: br label [[PATATINO]]
+; CHECK: patatino:
+; CHECK-NEXT: [[MEH:%.*]] = phi i16 [ [[TMP0]], [[WINKY]] ], [ [[CONV1]], [[TINKY]] ]
+; CHECK-NEXT: [[BANANA:%.*]] = phi i16 [ [[TMP0]], [[TINKY]] ], [ [[CONV1]], [[WINKY]] ]
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: end:
+; CHECK-NEXT: [[PROMOTED:%.*]] = zext i16 [[BANANA]] to i32
+; CHECK-NEXT: [[OTHER:%.*]] = zext i16 [[MEH]] to i32
+; CHECK-NEXT: [[FIRST:%.*]] = tail call i32 (ptr, ...) @printf(ptr @.str, i32 [[PROMOTED]])
+; CHECK-NEXT: [[SECOND:%.*]] = tail call i32 (ptr, ...) @printf(ptr @.str, i32 [[OTHER]])
+; CHECK-NEXT: ret i32 0
+;
entry:
%0 = load i16, ptr @a, align 2
%conv = sext i16 %0 to i32
@@ -23,15 +49,11 @@ winky:
br label %patatino
patatino:
-; CHECK: %meh = phi i16 [ %0, %winky ], [ %conv1, %tinky ]
-; CHECK: %banana = phi i16 [ %0, %tinky ], [ %conv1, %winky ]
%meh = phi i16 [ %0, %winky ], [ %conv1, %tinky ]
%banana = phi i16 [ %0, %tinky ], [ %conv1, %winky ]
br label %end
end:
-; CHECK: %promoted = zext i16 %banana to i32
-; CHECK: %other = zext i16 %meh to i32
%promoted = zext i16 %banana to i32
%other = zext i16 %meh to i32
%first = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %promoted)
diff --git a/llvm/test/Transforms/NewGVN/pr33014.ll b/llvm/test/Transforms/NewGVN/pr33014.ll
index f6e9197..04f9df2 100644
--- a/llvm/test/Transforms/NewGVN/pr33014.ll
+++ b/llvm/test/Transforms/NewGVN/pr33014.ll
@@ -1,33 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; Make sure we don't end up in an infinite recursion in singleReachablePHIPath().
; RUN: opt < %s -passes=newgvn -S | FileCheck %s
@c = external global i64, align 8
-; CHECK-LABEL: define void @tinkywinky() {
-; CHECK: entry:
-; CHECK-NEXT: br i1 undef, label %l2, label %if.then
-; CHECK: if.then: ; preds = %entry
-; CHECK-NEXT: br label %for.body
-; CHECK: ph: ; preds = %back, %ontrue
-; CHECK-NEXT: br label %for.body
-; CHECK: for.body: ; preds = %ph, %if.then
-; CHECK-NEXT: br i1 undef, label %ontrue, label %onfalse
-; CHECK: onfalse: ; preds = %for.body
-; CHECK-NEXT: %patatino = load i64, ptr @c
-; CHECK-NEXT: ret void
-; CHECK: ontrue: ; preds = %for.body
-; CHECK-NEXT: %dipsy = load i64, ptr @c
-; CHECK-NEXT: br label %ph
-; CHECK: back: ; preds = %l2
-; CHECK-NEXT: store i8 poison, ptr null
-; CHECK-NEXT: br label %ph
-; CHECK: end: ; preds = %l2
-; CHECK-NEXT: ret void
-; CHECK: l2: ; preds = %entry
-; CHECK-NEXT: br i1 false, label %back, label %end
-; CHECK-NEXT: }
define void @tinkywinky() {
+; CHECK-LABEL: define void @tinkywinky() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 undef, label [[L2:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: ph:
+; CHECK-NEXT: br label [[FOR_BODY]]
+; CHECK: for.body:
+; CHECK-NEXT: br i1 undef, label [[ONTRUE:%.*]], label [[ONFALSE:%.*]]
+; CHECK: onfalse:
+; CHECK-NEXT: [[PATATINO:%.*]] = load i64, ptr @c, align 4
+; CHECK-NEXT: ret void
+; CHECK: ontrue:
+; CHECK-NEXT: [[DIPSY:%.*]] = load i64, ptr @c, align 4
+; CHECK-NEXT: br label [[PH:%.*]]
+; CHECK: back:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br label [[PH]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+; CHECK: l2:
+; CHECK-NEXT: br i1 false, label [[BACK:%.*]], label [[END:%.*]]
+;
entry:
br i1 undef, label %l2, label %if.then
if.then:
diff --git a/llvm/test/Transforms/NewGVN/pr33086.ll b/llvm/test/Transforms/NewGVN/pr33086.ll
index 54802cd..ab6c00d 100644
--- a/llvm/test/Transforms/NewGVN/pr33086.ll
+++ b/llvm/test/Transforms/NewGVN/pr33086.ll
@@ -1,31 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S %s | FileCheck %s
; REQUIRES: asserts
-; CHECK-LABEL: define void @tinkywinky() {
-; CHECK: entry:
-; CHECK-NEXT: br i1 undef, label %for.cond18, label %for.cond.preheader
-; CHECK: for.cond.preheader:
-; CHECK-NEXT: br label %for.cond2thread-pre-split
-; CHECK: for.cond2thread-pre-split:
-; CHECK-NEXT: %conv24 = phi i32 [ 0, %for.cond.preheader ], [ %conv, %for.inc.split ]
-; CHECK-NEXT: br label %for.inc.split
-; CHECK: for.inc.split:
-; CHECK-NEXT: %add = shl nsw i32 %conv24, 16
-; CHECK-NEXT: %sext23 = add i32 %add, 65536
-; CHECK-NEXT: %conv = ashr exact i32 %sext23, 16
-; CHECK-NEXT: %cmp = icmp slt i32 %sext23, 3604480
-; CHECK-NEXT: br i1 %cmp, label %for.cond2thread-pre-split, label %l1.loopexit
-; CHECK: l1.loopexit:
-; CHECK-NEXT: br label %l1
-; CHECK: l1:
-; CHECK-NEXT: %0 = load i16, ptr null, align 2
-; CHECK-NEXT: %g.0.g.0..pr = load i16, ptr null, align 2
-; CHECK-NEXT: ret void
-; CHECK: for.cond18:
-; CHECK-NEXT: br label %l1
-; CHECK-NEXT: }
define void @tinkywinky() {
+; CHECK-LABEL: define void @tinkywinky() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 undef, label [[FOR_COND18:%.*]], label [[FOR_COND_PREHEADER:%.*]]
+; CHECK: for.cond.preheader:
+; CHECK-NEXT: br label [[FOR_COND2THREAD_PRE_SPLIT:%.*]]
+; CHECK: for.cond2thread-pre-split:
+; CHECK-NEXT: [[CONV24:%.*]] = phi i32 [ 0, [[FOR_COND_PREHEADER]] ], [ [[CONV:%.*]], [[FOR_INC_SPLIT:%.*]] ]
+; CHECK-NEXT: br label [[FOR_INC_SPLIT]]
+; CHECK: for.inc.split:
+; CHECK-NEXT: [[ADD:%.*]] = shl nsw i32 [[CONV24]], 16
+; CHECK-NEXT: [[SEXT23:%.*]] = add i32 [[ADD]], 65536
+; CHECK-NEXT: [[CONV]] = ashr exact i32 [[SEXT23]], 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[SEXT23]], 3604480
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND2THREAD_PRE_SPLIT]], label [[L1_LOOPEXIT:%.*]]
+; CHECK: l1.loopexit:
+; CHECK-NEXT: br label [[L1:%.*]]
+; CHECK: l1:
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr null, align 2
+; CHECK-NEXT: [[G_0_G_0__PR:%.*]] = load i16, ptr null, align 2
+; CHECK-NEXT: ret void
+; CHECK: for.cond18:
+; CHECK-NEXT: br label [[L1]]
+;
entry:
br i1 undef, label %for.cond18, label %for.cond.preheader
diff --git a/llvm/test/Transforms/NewGVN/pr33116.ll b/llvm/test/Transforms/NewGVN/pr33116.ll
index f5ef3ae..6609ef9 100644
--- a/llvm/test/Transforms/NewGVN/pr33116.ll
+++ b/llvm/test/Transforms/NewGVN/pr33116.ll
@@ -13,7 +13,7 @@ define void @b() {
; CHECK: c:
; CHECK-NEXT: br i1 undef, label [[IF_G:%.*]], label [[IF_E]]
; CHECK: if.g:
-; CHECK-NEXT: store i32 undef, ptr @a
+; CHECK-NEXT: store i32 undef, ptr @a, align 4
; CHECK-NEXT: br label [[WHILE_D]]
; CHECK: if.e:
; CHECK-NEXT: br label [[F]]
diff --git a/llvm/test/Transforms/NewGVN/pr33187.ll b/llvm/test/Transforms/NewGVN/pr33187.ll
index e5c3da2..37668bb 100644
--- a/llvm/test/Transforms/NewGVN/pr33187.ll
+++ b/llvm/test/Transforms/NewGVN/pr33187.ll
@@ -30,7 +30,7 @@ define void @fn1() local_unnamed_addr #0 {
; CHECK: while.body12:
; CHECK-NEXT: br i1 undef, label [[IF_END18]], label [[L]]
; CHECK: L.loopexit:
-; CHECK-NEXT: store i8 poison, ptr null
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[L]]
; CHECK: L:
; CHECK-NEXT: [[H_125]] = phi i32 [ [[H_127]], [[WHILE_BODY12]] ], [ poison, [[L_LOOPEXIT]] ]
@@ -114,13 +114,13 @@ attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="fals
define void @a() {
; CHECK-LABEL: @a(
; CHECK-NEXT: b:
-; CHECK-NEXT: store ptr null, ptr null
+; CHECK-NEXT: store ptr null, ptr null, align 8
; CHECK-NEXT: br label [[D:%.*]]
; CHECK: d:
; CHECK-NEXT: [[I:%.*]] = phi ptr [ null, [[B:%.*]] ], [ [[E:%.*]], [[F:%.*]] ]
; CHECK-NEXT: br i1 undef, label [[F]], label [[G:%.*]]
; CHECK: g:
-; CHECK-NEXT: store ptr [[I]], ptr null
+; CHECK-NEXT: store ptr [[I]], ptr null, align 8
; CHECK-NEXT: unreachable
; CHECK: f:
; CHECK-NEXT: [[E]] = getelementptr i8, ptr [[I]], i64 1
diff --git a/llvm/test/Transforms/NewGVN/pr33196.ll b/llvm/test/Transforms/NewGVN/pr33196.ll
index c312d5e..c04b895 100644
--- a/llvm/test/Transforms/NewGVN/pr33196.ll
+++ b/llvm/test/Transforms/NewGVN/pr33196.ll
@@ -1,33 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S -passes=newgvn %s | FileCheck %s
-; CHECK: define i32 @main() {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %tmp = load i32, ptr @d, align 4
-; CHECK-NEXT: %tmp1 = load i32, ptr @c, align 4
-; CHECK-NEXT: %tobool = icmp eq i32 %tmp1, -1
-; CHECK-NEXT: br i1 %tobool, label %if.end, label %if.then
-; CHECK: if.then:
-; CHECK-NEXT: br label %L
-; CHECK: L:
-; CHECK-NEXT: %e.0 = phi i32 [ 0, %if.then ], [ %e.1, %if.then4 ]
-; CHECK-NEXT: br label %if.end
-; CHECK: if.end:
-; CHECK-NEXT: %e.1 = phi i32 [ %e.0, %L ], [ %tmp, %entry ]
-; CHECK-NEXT: store i32 %e.1, ptr @a, align 4
-; CHECK-NEXT: %tmp2 = load i32, ptr @b, align 4
-; CHECK-NEXT: store i32 0, ptr @b, align 4
-; CHECK-NEXT: %sext = shl i32 %tmp2, 16
-; CHECK-NEXT: %conv1 = ashr exact i32 %sext, 16
-; CHECK-NEXT: %add = add nsw i32 %conv1, %tmp1
-; CHECK-NEXT: %add2 = add nsw i32 %add, %e.1
-; CHECK-NEXT: store i32 %add2, ptr @a, align 4
-; CHECK-NEXT: %tobool3 = icmp eq i32 %add2, 0
-; CHECK-NEXT: br i1 %tobool3, label %if.end5, label %if.then4
-; CHECK: if.then4:
-; CHECK-NEXT: br label %L
-; CHECK: if.end5:
-; CHECK-NEXT: ret i32 0
-; CHECK-NEXT: }
@d = global i32 1, align 4
@c = common global i32 0, align 4
@@ -35,6 +8,34 @@
@b = common global i32 0, align 4
define i32 @main() {
+; CHECK-LABEL: define i32 @main() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr @d, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @c, align 4
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP1]], -1
+; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: br label [[L:%.*]]
+; CHECK: L:
+; CHECK-NEXT: [[E_0:%.*]] = phi i32 [ 0, [[IF_THEN]] ], [ [[E_1:%.*]], [[IF_THEN4:%.*]] ]
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[E_1]] = phi i32 [ [[E_0]], [[L]] ], [ [[TMP]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: store i32 [[E_1]], ptr @a, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @b, align 4
+; CHECK-NEXT: store i32 0, ptr @b, align 4
+; CHECK-NEXT: [[SEXT:%.*]] = shl i32 [[TMP2]], 16
+; CHECK-NEXT: [[CONV1:%.*]] = ashr exact i32 [[SEXT]], 16
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], [[TMP1]]
+; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[ADD]], [[E_1]]
+; CHECK-NEXT: store i32 [[ADD2]], ptr @a, align 4
+; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp eq i32 [[ADD2]], 0
+; CHECK-NEXT: br i1 [[TOBOOL3]], label [[IF_END5:%.*]], label [[IF_THEN4]]
+; CHECK: if.then4:
+; CHECK-NEXT: br label [[L]]
+; CHECK: if.end5:
+; CHECK-NEXT: ret i32 0
+;
entry:
%tmp = load i32, ptr @d, align 4
%tmp1 = load i32, ptr @c, align 4
diff --git a/llvm/test/Transforms/NewGVN/pr33204.ll b/llvm/test/Transforms/NewGVN/pr33204.ll
index 99c4824..482e35e 100644
--- a/llvm/test/Transforms/NewGVN/pr33204.ll
+++ b/llvm/test/Transforms/NewGVN/pr33204.ll
@@ -20,10 +20,10 @@ define void @hoge(i32 %arg) {
; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ 0, [[BB1:%.*]] ], [ [[ARG:%.*]], [[BB:%.*]] ]
; CHECK-NEXT: br label [[BB6:%.*]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @global, align 4, !h !0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @global, align 4, !h [[META0:![0-9]+]]
; CHECK-NEXT: unreachable
; CHECK: bb6:
-; CHECK-NEXT: store i32 [[TMP]], ptr @global.1, align 4, !h !0
+; CHECK-NEXT: store i32 [[TMP]], ptr @global.1, align 4, !h [[META0]]
; CHECK-NEXT: br i1 undef, label [[BB7:%.*]], label [[BB1]]
; CHECK: bb7:
; CHECK-NEXT: br i1 undef, label [[BB10:%.*]], label [[BB8:%.*]]
@@ -33,7 +33,7 @@ define void @hoge(i32 %arg) {
; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb10:
-; CHECK-NEXT: store i32 0, ptr @global, align 4, !h !0
+; CHECK-NEXT: store i32 0, ptr @global, align 4, !h [[META0]]
; CHECK-NEXT: br label [[BB7]]
;
bb:
diff --git a/llvm/test/Transforms/NewGVN/pr33305.ll b/llvm/test/Transforms/NewGVN/pr33305.ll
index f87cf08..3a19f61 100644
--- a/llvm/test/Transforms/NewGVN/pr33305.ll
+++ b/llvm/test/Transforms/NewGVN/pr33305.ll
@@ -19,14 +19,14 @@ target triple = "x86_64-apple-macosx10.12.0"
define i32 @main() local_unnamed_addr #0 {
; CHECK-LABEL: @main(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[DOTPR_I:%.*]] = load i32, ptr @c, align 4, !tbaa !3
+; CHECK-NEXT: [[DOTPR_I:%.*]] = load i32, ptr @c, align 4, !tbaa [[TBAA3:![0-9]+]]
; CHECK-NEXT: [[CMP13_I:%.*]] = icmp slt i32 [[DOTPR_I]], 1
; CHECK-NEXT: br i1 [[CMP13_I]], label [[FOR_COND1_PREHEADER_LR_PH_I:%.*]], label [[ENTRY_FOR_END9_I_CRIT_EDGE:%.*]]
; CHECK: entry.for.end9.i_crit_edge:
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr @h, align 4, !tbaa !3
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr @h, align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: br label [[FOR_END9_I:%.*]]
; CHECK: for.cond1.preheader.lr.ph.i:
-; CHECK-NEXT: [[G_PROMOTED14_I:%.*]] = load i32, ptr @g, align 4, !tbaa !3
+; CHECK-NEXT: [[G_PROMOTED14_I:%.*]] = load i32, ptr @g, align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_I:%.*]]
; CHECK: for.cond1.preheader.i:
; CHECK-NEXT: [[INC816_I:%.*]] = phi i32 [ [[DOTPR_I]], [[FOR_COND1_PREHEADER_LR_PH_I]] ], [ [[INC8_I:%.*]], [[FOR_INC7_I:%.*]] ]
@@ -42,9 +42,9 @@ define i32 @main() local_unnamed_addr #0 {
; CHECK: lor.rhs.i:
; CHECK-NEXT: [[LNOT_I:%.*]] = xor i1 [[TOBOOL_I]], true
; CHECK-NEXT: [[LNOT_EXT_I:%.*]] = zext i1 [[LNOT_I]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @e, align 4, !tbaa !3
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @e, align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[TMP3]], [[LNOT_EXT_I]]
-; CHECK-NEXT: store i32 [[XOR_I]], ptr @e, align 4, !tbaa !3
+; CHECK-NEXT: store i32 [[XOR_I]], ptr @e, align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: br label [[LOR_END_I]]
; CHECK: lor.end.i:
; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[INC12_I]], 1
@@ -55,28 +55,28 @@ define i32 @main() local_unnamed_addr #0 {
; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[INC816_I]], 0
; CHECK-NEXT: br i1 [[CMP_I]], label [[FOR_COND1_PREHEADER_I]], label [[FOR_COND_FOR_END9_CRIT_EDGE_I:%.*]]
; CHECK: for.cond.for.end9_crit_edge.i:
-; CHECK-NEXT: store i32 0, ptr @g, align 4, !tbaa !3
-; CHECK-NEXT: store i32 2, ptr @h, align 4, !tbaa !3
-; CHECK-NEXT: store i32 [[INC8_I]], ptr @c, align 4, !tbaa !3
+; CHECK-NEXT: store i32 0, ptr @g, align 4, !tbaa [[TBAA3]]
+; CHECK-NEXT: store i32 2, ptr @h, align 4, !tbaa [[TBAA3]]
+; CHECK-NEXT: store i32 [[INC8_I]], ptr @c, align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: br label [[FOR_END9_I]]
; CHECK: for.end9.i:
; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[DOTPRE]], [[ENTRY_FOR_END9_I_CRIT_EDGE]] ], [ 2, [[FOR_COND_FOR_END9_CRIT_EDGE_I]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr @b, align 8, !tbaa !7
-; CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4, !tbaa !3
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr @e, align 4, !tbaa !3
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr @b, align 8, !tbaa [[TBAA7:![0-9]+]]
+; CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4, !tbaa [[TBAA3]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr @e, align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[CMP10_I:%.*]] = icmp slt i32 [[TMP6]], -1
; CHECK-NEXT: br i1 [[CMP10_I]], label [[IF_THEN_I:%.*]], label [[FN1_EXIT:%.*]]
; CHECK: if.then.i:
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr @f, align 4, !tbaa !3
-; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP5]], align 4, !tbaa !3
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr @f, align 4, !tbaa [[TBAA3]]
+; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP5]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: br label [[FN1_EXIT]]
; CHECK: fn1.exit:
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr @a, align 4, !tbaa !3
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr @a, align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP8]], 0
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: [[PUTS2:%.*]] = tail call i32 @puts(ptr @str.2)
-; CHECK-NEXT: tail call void @abort()
+; CHECK-NEXT: tail call void @abort() #[[ATTR3:[0-9]+]]
; CHECK-NEXT: unreachable
; CHECK: if.end:
; CHECK-NEXT: [[PUTS:%.*]] = tail call i32 @puts(ptr @str)
diff --git a/llvm/test/Transforms/NewGVN/pr33367.ll b/llvm/test/Transforms/NewGVN/pr33367.ll
index dc5d190..597caa2 100644
--- a/llvm/test/Transforms/NewGVN/pr33367.ll
+++ b/llvm/test/Transforms/NewGVN/pr33367.ll
@@ -11,19 +11,19 @@ define %MNR_struct @f000316011717_2(ptr %pDS, ptr %pCG) #2 {
; CHECK-NEXT: Entry:
; CHECK-NEXT: [[RESTART:%.*]] = alloca [[MNR_STRUCT:%.*]], align 8
; CHECK-NEXT: [[PCARRY:%.*]] = getelementptr [[DS_STRUCT:%.*]], ptr [[PDS:%.*]], i32 0, i32 1
-; CHECK-NEXT: [[BASE:%.*]] = load ptr, ptr [[PDS]], align 8, !tbaa !14
+; CHECK-NEXT: [[BASE:%.*]] = load ptr, ptr [[PDS]], align 8, !tbaa [[TBAA14:![0-9]+]]
; CHECK-NEXT: [[ABSADDR:%.*]] = getelementptr i64, ptr [[BASE]], i64 9
-; CHECK-NEXT: [[EXTARGET:%.*]] = load i64, ptr [[ABSADDR]], align 8, !tbaa !4
+; CHECK-NEXT: [[EXTARGET:%.*]] = load i64, ptr [[ABSADDR]], align 8, !tbaa [[TBAA4:![0-9]+]]
; CHECK-NEXT: [[TEMPLATE:%.*]] = icmp eq i64 [[EXTARGET]], 8593987412
; CHECK-NEXT: br i1 [[TEMPLATE]], label %"BB3.000316011731#1", label [[BB2_000316011731_5:%.*]]
; CHECK: "BB3.000316011731#1":
; CHECK-NEXT: [[PBASE8:%.*]] = getelementptr [32 x ptr], ptr [[PDS]], i64 0, i64 29
-; CHECK-NEXT: [[BASE9:%.*]] = load ptr, ptr [[PBASE8]], align 8, !tbaa !14
+; CHECK-NEXT: [[BASE9:%.*]] = load ptr, ptr [[PBASE8]], align 8, !tbaa [[TBAA14]]
; CHECK-NEXT: [[ABSADDR1:%.*]] = getelementptr i64, ptr [[BASE9]], i64 7
-; CHECK-NEXT: [[RMEM:%.*]] = load i64, ptr [[ABSADDR1]], align 8, !tbaa !4
+; CHECK-NEXT: [[RMEM:%.*]] = load i64, ptr [[ABSADDR1]], align 8, !tbaa [[TBAA4]]
; CHECK-NEXT: [[PWT:%.*]] = getelementptr [[DS_STRUCT]], ptr [[PDS]], i32 0, i32 2
; CHECK-NEXT: [[PWTE:%.*]] = getelementptr [32 x i16], ptr [[PWT]], i64 0, i64 8593987412
-; CHECK-NEXT: [[SHIFTS:%.*]] = load i16, ptr [[PWTE]], align 2, !tbaa !18, !invariant.load !20
+; CHECK-NEXT: [[SHIFTS:%.*]] = load i16, ptr [[PWTE]], align 2, !tbaa [[TBAA18:![0-9]+]], !invariant.load [[META20:![0-9]+]]
; CHECK-NEXT: [[SLOWJ:%.*]] = icmp eq i16 [[SHIFTS]], 0
; CHECK-NEXT: br i1 [[SLOWJ]], label [[BB2_000316011731_5]], label %"BB3.000316011731#1.1"
; CHECK: BB2.000316011731.5:
@@ -34,22 +34,22 @@ define %MNR_struct @f000316011717_2(ptr %pDS, ptr %pCG) #2 {
; CHECK-NEXT: [[SHIFTS1:%.*]] = zext i16 [[SHIFTS]] to i64
; CHECK-NEXT: [[VAL:%.*]] = call i64 @llvm.x86.bmi.bextr.64(i64 [[RMEM]], i64 [[SHIFTS1]])
; CHECK-NEXT: [[PREG:%.*]] = getelementptr [64 x i64], ptr [[PCG:%.*]], i64 0, i64 12
-; CHECK-NEXT: store i64 [[VAL]], ptr [[PREG]], align 32, !tbaa !10
+; CHECK-NEXT: store i64 [[VAL]], ptr [[PREG]], align 32, !tbaa [[TBAA10:![0-9]+]]
; CHECK-NEXT: [[PREG2:%.*]] = getelementptr [64 x i64], ptr [[PCG]], i64 0, i64 14
-; CHECK-NEXT: [[REG:%.*]] = load i64, ptr [[PREG2]], align 16, !tbaa !12
-; CHECK-NEXT: [[BASE2:%.*]] = load ptr, ptr [[PBASE8]], align 8, !tbaa !14
+; CHECK-NEXT: [[REG:%.*]] = load i64, ptr [[PREG2]], align 16, !tbaa [[TBAA12:![0-9]+]]
+; CHECK-NEXT: [[BASE2:%.*]] = load ptr, ptr [[PBASE8]], align 8, !tbaa [[TBAA14]]
; CHECK-NEXT: [[ABSADDR2:%.*]] = getelementptr i64, ptr [[BASE2]], i64 [[REG]]
-; CHECK-NEXT: [[RMEM2:%.*]] = load i64, ptr [[ABSADDR2]], align 8, !tbaa !1
+; CHECK-NEXT: [[RMEM2:%.*]] = load i64, ptr [[ABSADDR2]], align 8, !tbaa [[TBAA1:![0-9]+]]
; CHECK-NEXT: [[PREG7:%.*]] = getelementptr [64 x i64], ptr [[PCG]], i64 0, i64 9
-; CHECK-NEXT: store i64 [[RMEM2]], ptr [[PREG7]], align 8, !tbaa !8
+; CHECK-NEXT: store i64 [[RMEM2]], ptr [[PREG7]], align 8, !tbaa [[TBAA8:![0-9]+]]
; CHECK-NEXT: [[ADD2C279:%.*]] = add i64 [[RMEM2]], [[VAL]]
; CHECK-NEXT: [[CCHK:%.*]] = icmp sge i64 [[ADD2C279]], 0
; CHECK-NEXT: [[CFL:%.*]] = zext i1 [[CCHK]] to i8
-; CHECK-NEXT: store i8 [[CFL]], ptr [[PCARRY]], align 1, !tbaa !16
+; CHECK-NEXT: store i8 [[CFL]], ptr [[PCARRY]], align 1, !tbaa [[TBAA16:![0-9]+]]
; CHECK-NEXT: br label [[EXIT]]
; CHECK: Exit:
; CHECK-NEXT: [[RESTART378:%.*]] = load [[MNR_STRUCT]], ptr [[RESTART]], align 8
-; CHECK-NEXT: ret [[MNR_STRUCT]] %restart378
+; CHECK-NEXT: ret [[MNR_STRUCT]] [[RESTART378]]
;
Entry:
%restart = alloca %MNR_struct
diff --git a/llvm/test/Transforms/NewGVN/pr34452.ll b/llvm/test/Transforms/NewGVN/pr34452.ll
index f5d5fda..9e65349 100644
--- a/llvm/test/Transforms/NewGVN/pr34452.ll
+++ b/llvm/test/Transforms/NewGVN/pr34452.ll
@@ -9,7 +9,7 @@ source_filename = "bugpoint-output-09f7a24.bc"
define void @sgrep() local_unnamed_addr #0 {
; CHECK-LABEL: @sgrep(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @WHOLELINE, align 4, !tbaa !1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @WHOLELINE, align 4, !tbaa [[TBAA1:![0-9]+]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP0]], 0
; CHECK-NEXT: [[DOT:%.*]] = select i1 [[TOBOOL]], i32 2048, i32 2047
; CHECK-NEXT: br label [[WHILE_BODY_US:%.*]]
diff --git a/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll b/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll
index cbdf209..1312f9f 100644
--- a/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll
+++ b/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll
@@ -40,8 +40,8 @@ define void @d() {
; CHECK: cleanup:
; CHECK-NEXT: [[CLEANUP_DEST:%.*]] = phi i32 [ poison, [[IF_END12]] ], [ 1, [[IF_THEN11]] ], [ 9, [[IF_THEN]] ]
; CHECK-NEXT: switch i32 [[CLEANUP_DEST]], label [[CLEANUP14]] [
-; CHECK-NEXT: i32 0, label [[FOR_COND4]]
-; CHECK-NEXT: i32 9, label [[FOR_END13:%.*]]
+; CHECK-NEXT: i32 0, label [[FOR_COND4]]
+; CHECK-NEXT: i32 9, label [[FOR_END13:%.*]]
; CHECK-NEXT: ]
; CHECK: for.end13:
; CHECK-NEXT: br label [[CLEANUP14]]
diff --git a/llvm/test/Transforms/NewGVN/pr43441.ll b/llvm/test/Transforms/NewGVN/pr43441.ll
index 5c4a9c3..a5f711d 100644
--- a/llvm/test/Transforms/NewGVN/pr43441.ll
+++ b/llvm/test/Transforms/NewGVN/pr43441.ll
@@ -1,15 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK-LABEL: @print_long_format()
define dso_local void @print_long_format() #0 {
+; CHECK-LABEL: define dso_local void @print_long_format(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: switch i32 undef, label [[SW_DEFAULT:%.*]] [
+; CHECK-NEXT: i32 1, label [[SW_BB:%.*]]
+; CHECK-NEXT: i32 0, label [[SW_BB19:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_BB23:%.*]]
+; CHECK-NEXT: ]
+; CHECK: sw.bb:
+; CHECK-NEXT: unreachable
+; CHECK: sw.bb19:
+; CHECK-NEXT: br i1 undef, label [[IF_THEN37:%.*]], label [[IF_END50:%.*]]
+; CHECK: sw.bb23:
+; CHECK-NEXT: unreachable
+; CHECK: sw.default:
+; CHECK-NEXT: unreachable
+; CHECK: if.then37:
+; CHECK-NEXT: unreachable
+; CHECK: if.end50:
+; CHECK-NEXT: [[CALL180:%.*]] = call i32 @timespec_cmp() #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: ret void
+;
entry:
switch i32 undef, label %sw.default [
- i32 1, label %sw.bb
- i32 0, label %sw.bb19
- i32 2, label %sw.bb23
+ i32 1, label %sw.bb
+ i32 0, label %sw.bb19
+ i32 2, label %sw.bb23
]
sw.bb: ; preds = %entry
diff --git a/llvm/test/Transforms/NewGVN/pre-compare.ll b/llvm/test/Transforms/NewGVN/pre-compare.ll
index 9fd20fc..8e4e5f8 100644
--- a/llvm/test/Transforms/NewGVN/pre-compare.ll
+++ b/llvm/test/Transforms/NewGVN/pre-compare.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
; C source:
@@ -37,6 +38,28 @@
@.str3 = private unnamed_addr constant [12 x i8] c"step 2: %d\0A\00", align 1
define void @f(i32 %x) noreturn nounwind uwtable ssp {
+; CHECK-LABEL: define void @f(
+; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 1
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND_PREHEADER:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[X]], 2
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP1]], ptr @.str, ptr @.str1
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @puts(ptr [[COND]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: br label [[FOR_COND_PREHEADER]]
+; CHECK: for.cond.preheader:
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[X]], 2
+; CHECK-NEXT: br label [[FOR_COND:%.*]]
+; CHECK: for.cond:
+; CHECK-NEXT: [[CALL2:%.*]] = tail call i32 @puts(ptr @.str2) #[[ATTR1]]
+; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND_BACKEDGE:%.*]], label [[IF_END5:%.*]]
+; CHECK: if.end5:
+; CHECK-NEXT: [[CALL6:%.*]] = tail call i32 (ptr, ...) @printf(ptr @.str3, i32 [[X]]) #[[ATTR1]]
+; CHECK-NEXT: br label [[FOR_COND_BACKEDGE]]
+; CHECK: for.cond.backedge:
+; CHECK-NEXT: br label [[FOR_COND]]
+;
entry:
%cmp = icmp eq i32 %x, 1
br i1 %cmp, label %for.cond.preheader, label %if.then
diff --git a/llvm/test/Transforms/NewGVN/preserve-metadata-for-predicate-replacements.ll b/llvm/test/Transforms/NewGVN/preserve-metadata-for-predicate-replacements.ll
index 1ca24af..a63ca13 100644
--- a/llvm/test/Transforms/NewGVN/preserve-metadata-for-predicate-replacements.ll
+++ b/llvm/test/Transforms/NewGVN/preserve-metadata-for-predicate-replacements.ll
@@ -9,7 +9,7 @@ declare void @use(i32)
define i32 @test(ptr %p1, ptr %p2, i1 %c) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[P1:%.*]], align 8, !tbaa !0
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[P1:%.*]], align 8, !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: [[CMP_1:%.*]] = icmp slt i32 [[LV]], 1
; CHECK-NEXT: br i1 [[CMP_1]], label [[EXIT:%.*]], label [[IF_FALSE:%.*]]
; CHECK: if.false:
diff --git a/llvm/test/Transforms/NewGVN/readattrs.ll b/llvm/test/Transforms/NewGVN/readattrs.ll
index 049a2fc..544fe45 100644
--- a/llvm/test/Transforms/NewGVN/readattrs.ll
+++ b/llvm/test/Transforms/NewGVN/readattrs.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S -o - < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
@@ -6,12 +7,15 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @use(ptr readonly nocapture)
define i8 @test() {
+; CHECK-LABEL: define i8 @test() {
+; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
+; CHECK-NEXT: store i8 1, ptr [[A]], align 1
+; CHECK-NEXT: call void @use(ptr [[A]])
+; CHECK-NEXT: ret i8 1
+;
%a = alloca i8
store i8 1, ptr %a
call void @use(ptr %a)
%b = load i8, ptr %a
ret i8 %b
-; CHECK-LABEL: define i8 @test(
-; CHECK: call void @use(ptr %a)
-; CHECK-NEXT: ret i8 1
}
diff --git a/llvm/test/Transforms/NewGVN/rle-nonlocal.ll b/llvm/test/Transforms/NewGVN/rle-nonlocal.ll
index c2fb391..efdfd1f 100644
--- a/llvm/test/Transforms/NewGVN/rle-nonlocal.ll
+++ b/llvm/test/Transforms/NewGVN/rle-nonlocal.ll
@@ -7,14 +7,14 @@ define i32 @main(ptr %p, i32 %x, i32 %y) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
; CHECK: block2:
-; CHECK-NEXT: [[A:%.*]] = load ptr, ptr [[P:%.*]]
+; CHECK-NEXT: [[A:%.*]] = load ptr, ptr [[P:%.*]], align 8
; CHECK-NEXT: br label [[BLOCK4:%.*]]
; CHECK: block3:
-; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[P]]
+; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[P]], align 8
; CHECK-NEXT: br label [[BLOCK4]]
; CHECK: block4:
; CHECK-NEXT: [[EXISTINGPHI:%.*]] = phi ptr [ [[A]], [[BLOCK2]] ], [ [[B]], [[BLOCK3]] ]
-; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[EXISTINGPHI]]
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[EXISTINGPHI]], align 4
; CHECK-NEXT: [[E:%.*]] = add i32 [[C]], [[C]]
; CHECK-NEXT: ret i32 [[E]]
;
diff --git a/llvm/test/Transforms/NewGVN/rle.ll b/llvm/test/Transforms/NewGVN/rle.ll
index 1cfdc83..950c492 100644
--- a/llvm/test/Transforms/NewGVN/rle.ll
+++ b/llvm/test/Transforms/NewGVN/rle.ll
@@ -1,15 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -data-layout="e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -passes=newgvn,dce -S | FileCheck %s
; RUN: opt < %s -data-layout="E-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -passes=newgvn,dce -S | FileCheck %s
; memset -> i16 forwarding.
define signext i16 @memset_to_i16_local(ptr %A) nounwind ssp {
+; CHECK-LABEL: define signext i16 @memset_to_i16_local(
+; CHECK-SAME: ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr [[A]], i8 1, i64 200, i1 false)
+; CHECK-NEXT: ret i16 257
+;
entry:
tail call void @llvm.memset.p0.i64(ptr %A, i8 1, i64 200, i1 false)
%arrayidx = getelementptr inbounds i16, ptr %A, i64 42
%tmp2 = load i16, ptr %arrayidx
ret i16 %tmp2
-; CHECK-LABEL: @memset_to_i16_local(
-; CHECK-NOT: load
-; CHECK: ret i16 257
}
@GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 }
@@ -17,37 +21,48 @@ entry:
; memset -> float forwarding.
define float @memcpy_to_float_local(ptr %A) nounwind ssp {
+; CHECK-LABEL: define float @memcpy_to_float_local(
+; CHECK-SAME: ptr [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[A]], ptr @GCst, i64 12, i1 false)
+; CHECK-NEXT: ret float 1.400000e+01
+;
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr %A, ptr @GCst, i64 12, i1 false)
%arrayidx = getelementptr inbounds float, ptr %A, i64 1 ; <ptr> [#uses=1]
%tmp2 = load float, ptr %arrayidx ; <float> [#uses=1]
ret float %tmp2
-; CHECK-LABEL: @memcpy_to_float_local(
-; CHECK-NOT: load
-; CHECK: ret float 1.400000e+01
}
; memcpy from address space 1
define float @memcpy_to_float_local_as1(ptr %A) nounwind ssp {
+; CHECK-LABEL: define float @memcpy_to_float_local_as1(
+; CHECK-SAME: ptr [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p1.i64(ptr [[A]], ptr addrspace(1) @GCst_as1, i64 12, i1 false)
+; CHECK-NEXT: ret float 1.400000e+01
+;
entry:
tail call void @llvm.memcpy.p0.p1.i64(ptr %A, ptr addrspace(1) @GCst_as1, i64 12, i1 false)
%arrayidx = getelementptr inbounds float, ptr %A, i64 1 ; <ptr> [#uses=1]
%tmp2 = load float, ptr %arrayidx ; <float> [#uses=1]
ret float %tmp2
-; CHECK-LABEL: @memcpy_to_float_local_as1(
-; CHECK-NOT: load
-; CHECK: ret float 1.400000e+01
}
; PR6642
define i32 @memset_to_load() nounwind readnone {
+; CHECK-LABEL: define i32 @memset_to_load(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X:%.*]] = alloca [256 x i32], align 4
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[X]], i8 0, i64 1024, i1 false)
+; CHECK-NEXT: ret i32 0
+;
entry:
%x = alloca [256 x i32], align 4 ; <ptr> [#uses=2]
call void @llvm.memset.p0.i64(ptr align 4 %x, i8 0, i64 1024, i1 false)
%arraydecay = getelementptr inbounds [256 x i32], ptr %x, i32 0, i32 0 ; <ptr>
%tmp1 = load i32, ptr %arraydecay ; <i32> [#uses=1]
ret i32 %tmp1
-; CHECK-LABEL: @memset_to_load(
-; CHECK: ret i32 0
}
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/Transforms/NewGVN/simp-to-self.ll b/llvm/test/Transforms/NewGVN/simp-to-self.ll
index fb8a019..f9a0ec2 100644
--- a/llvm/test/Transforms/NewGVN/simp-to-self.ll
+++ b/llvm/test/Transforms/NewGVN/simp-to-self.ll
@@ -1,13 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S < %s -passes=newgvn | FileCheck %s
-; CHECK-LABEL: for.cond:
-; CHECK-NEXT: %lv = load i32, ptr @a
-; CHECK-NEXT: %bf.clear = and i32 %lv, -131072
-; CHECK-NEXT: %bf.set = or i32 1, %bf.clear
-; CHECK-NEXT: br i1 %bc, label %for.cond, label %exit
@a = external global i64
define void @fn1(i1 %bc) {
+; CHECK-LABEL: define void @fn1(
+; CHECK-SAME: i1 [[BC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_COND:%.*]]
+; CHECK: for.cond:
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr @a, align 4
+; CHECK-NEXT: [[BF_CLEAR:%.*]] = and i32 [[LV]], -131072
+; CHECK-NEXT: [[BF_SET:%.*]] = or i32 1, [[BF_CLEAR]]
+; CHECK-NEXT: br i1 [[BC]], label [[FOR_COND]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: store i32 [[BF_SET]], ptr @a, align 4
+; CHECK-NEXT: ret void
+;
entry:
br label %for.cond
diff --git a/llvm/test/Transforms/NewGVN/stale-loop-info.ll b/llvm/test/Transforms/NewGVN/stale-loop-info.ll
index 8870824..7abe80b 100644
--- a/llvm/test/Transforms/NewGVN/stale-loop-info.ll
+++ b/llvm/test/Transforms/NewGVN/stale-loop-info.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes='require<loops>,newgvn' -S < %s | FileCheck %s
; This used to fail with ASAN enabled and if for some reason LoopInfo remained
@@ -14,6 +15,29 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
declare void @snork.1(ptr) local_unnamed_addr #0
define hidden zeroext i1 @eggs(ptr %arg, i1 %arg2) unnamed_addr align 2 {
+; CHECK-LABEL: define hidden zeroext i1 @eggs(
+; CHECK-SAME: ptr [[ARG:%.*]], i1 [[ARG2:%.*]]) unnamed_addr align 2 {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br i1 [[ARG2]], label [[BB14:%.*]], label [[BB3:%.*]]
+; CHECK: bb3:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT_WIBBLE_1028:%.*]], ptr [[ARG]], i64 0, i32 2, i32 0, i32 0, i64 0
+; CHECK-NEXT: br label [[BB6:%.*]]
+; CHECK: bb6:
+; CHECK-NEXT: br label [[BB7:%.*]]
+; CHECK: bb7:
+; CHECK-NEXT: br i1 undef, label [[BB11:%.*]], label [[BB8:%.*]]
+; CHECK: bb8:
+; CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP]], align 8
+; CHECK-NEXT: br label [[BB12:%.*]]
+; CHECK: bb11:
+; CHECK-NEXT: br label [[BB12]]
+; CHECK: bb12:
+; CHECK-NEXT: [[TMP13:%.*]] = phi ptr [ [[TMP]], [[BB11]] ], [ [[TMP9]], [[BB8]] ]
+; CHECK-NEXT: call void @snork.1(ptr [[TMP13]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: br label [[BB6]]
+; CHECK: bb14:
+; CHECK-NEXT: ret i1 false
+;
bb:
br i1 %arg2, label %bb14, label %bb3
@@ -29,7 +53,6 @@ bb7: ; preds = %bb6
bb8: ; preds = %bb7
%tmp9 = load ptr, ptr %tmp, align 8
-; CHECK: %tmp9 = load ptr, ptr %tmp, align 8
br label %bb12
bb11: ; preds = %bb7
diff --git a/llvm/test/Transforms/NewGVN/tbaa.ll b/llvm/test/Transforms/NewGVN/tbaa.ll
index e6d66dc..335e782 100644
--- a/llvm/test/Transforms/NewGVN/tbaa.ll
+++ b/llvm/test/Transforms/NewGVN/tbaa.ll
@@ -1,10 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
define i32 @test1(ptr %p, ptr %q) {
-; CHECK-LABEL: @test1(ptr %p, ptr %q)
-; CHECK: call i32 @foo(ptr %p)
-; CHECK-NOT: tbaa
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test1(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]])
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !0
%b = call i32 @foo(ptr %p)
%c = add i32 %a, %b
@@ -12,9 +15,12 @@ define i32 @test1(ptr %p, ptr %q) {
}
define i32 @test2(ptr %p, ptr %q) {
-; CHECK-LABEL: @test2(ptr %p, ptr %q)
-; CHECK: call i32 @foo(ptr %p), !tbaa [[TAGC:!.*]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test2(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]]), !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !0
%b = call i32 @foo(ptr %p), !tbaa !0
%c = add i32 %a, %b
@@ -22,9 +28,12 @@ define i32 @test2(ptr %p, ptr %q) {
}
define i32 @test3(ptr %p, ptr %q) {
-; CHECK-LABEL: @test3(ptr %p, ptr %q)
-; CHECK: call i32 @foo(ptr %p), !tbaa [[TAGB:!.*]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test3(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]]), !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !3
%b = call i32 @foo(ptr %p), !tbaa !3
%c = add i32 %a, %b
@@ -32,9 +41,12 @@ define i32 @test3(ptr %p, ptr %q) {
}
define i32 @test4(ptr %p, ptr %q) {
-; CHECK-LABEL: @test4(ptr %p, ptr %q)
-; CHECK: call i32 @foo(ptr %p), !tbaa [[TAGA:!.*]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test4(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]]), !tbaa [[TBAA6:![0-9]+]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !1
%b = call i32 @foo(ptr %p), !tbaa !0
%c = add i32 %a, %b
@@ -42,9 +54,12 @@ define i32 @test4(ptr %p, ptr %q) {
}
define i32 @test5(ptr %p, ptr %q) {
-; CHECK-LABEL: @test5(ptr %p, ptr %q)
-; CHECK: call i32 @foo(ptr %p), !tbaa [[TAGA]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test5(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]]), !tbaa [[TBAA6]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !0
%b = call i32 @foo(ptr %p), !tbaa !1
%c = add i32 %a, %b
@@ -52,9 +67,12 @@ define i32 @test5(ptr %p, ptr %q) {
}
define i32 @test6(ptr %p, ptr %q) {
-; CHECK-LABEL: @test6(ptr %p, ptr %q)
-; CHECK: call i32 @foo(ptr %p), !tbaa [[TAGA]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test6(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]]), !tbaa [[TBAA6]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !0
%b = call i32 @foo(ptr %p), !tbaa !3
%c = add i32 %a, %b
@@ -62,10 +80,12 @@ define i32 @test6(ptr %p, ptr %q) {
}
define i32 @test7(ptr %p, ptr %q) {
-; CHECK-LABEL: @test7(ptr %p, ptr %q)
-; CHECK: call i32 @foo(ptr %p)
-; CHECK-NOT: tbaa
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test7(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]])
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !4
%b = call i32 @foo(ptr %p), !tbaa !3
%c = add i32 %a, %b
@@ -73,9 +93,11 @@ define i32 @test7(ptr %p, ptr %q) {
}
define i32 @test8(ptr %p, ptr %q) {
-; CHECK-LABEL: @test8
-; CHECK-NEXT: store i32 15, ptr %p
-; CHECK-NEXT: ret i32 0
+; CHECK-LABEL: define i32 @test8(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: store i32 15, ptr [[P]], align 4
+; CHECK-NEXT: ret i32 0
+;
; Since we know the location is invariant, we can forward the
; load across the potentially aliasing store.
@@ -87,9 +109,11 @@ define i32 @test8(ptr %p, ptr %q) {
}
define i32 @test9(ptr %p, ptr %q) {
-; CHECK-LABEL: @test9
-; CHECK-NEXT: call void @clobber()
-; CHECK-NEXT: ret i32 0
+; CHECK-LABEL: define i32 @test9(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: call void @clobber()
+; CHECK-NEXT: ret i32 0
+;
; Since we know the location is invariant, we can forward the
; load across the potentially aliasing store (within the call).
@@ -103,9 +127,12 @@ define i32 @test9(ptr %p, ptr %q) {
define i32 @test10(ptr %p, ptr %q) {
; If one access encloses the other, then the merged access is the enclosed one
; and not just the common final access type.
-; CHECK-LABEL: @test10
-; CHECK: call i32 @foo(ptr %p), !tbaa [[TAG_X_i:!.*]]
-; CHECK: %c = add i32 %a, %a
+; CHECK-LABEL: define i32 @test10(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) {
+; CHECK-NEXT: [[A:%.*]] = call i32 @foo(ptr [[P]]), !tbaa [[TBAA7:![0-9]+]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]]
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = call i32 @foo(ptr %p), !tbaa !15 ; TAG_X_i
%b = call i32 @foo(ptr %p), !tbaa !19 ; TAG_Y_x_i
%c = add i32 %a, %b
@@ -115,12 +142,6 @@ define i32 @test10(ptr %p, ptr %q) {
declare void @clobber()
declare i32 @foo(ptr) readonly
-; CHECK-DAG: [[TAGC]] = !{[[TYPEC:!.*]], [[TYPEC]], i64 0}
-; CHECK-DAG: [[TYPEC]] = !{!"C", [[TYPEA:!.*]]}
-; CHECK-DAG: [[TYPEA]] = !{!"A", !{{.*}}}
-; CHECK-DAG: [[TAGB]] = !{[[TYPEB:!.*]], [[TYPEB]], i64 0}
-; CHECK-DAG: [[TYPEB]] = !{!"B", [[TYPEA]]}
-; CHECK-DAG: [[TAGA]] = !{[[TYPEA]], [[TYPEA]], i64 0}
!0 = !{!5, !5, i64 0}
!1 = !{!6, !6, i64 0}
!2 = !{!"tbaa root"}
@@ -132,9 +153,6 @@ declare i32 @foo(ptr) readonly
!8 = !{!"another root"}
!11 = !{!"scalar type", !8}
-; CHECK-DAG: [[TAG_X_i]] = !{[[TYPE_X:!.*]], [[TYPE_int:!.*]], i64 0}
-; CHECK-DAG: [[TYPE_X:!.*]] = !{!"struct X", [[TYPE_int]], i64 0}
-; CHECK-DAG: [[TYPE_int]] = !{!"int", {{!.*}}, i64 0}
!15 = !{!16, !17, i64 0} ; TAG_X_i
!16 = !{!"struct X", !17, i64 0} ; struct X { int i; };
!17 = !{!"int", !18, i64 0}
@@ -146,3 +164,16 @@ declare i32 @foo(ptr) readonly
; A TBAA structure who's only point is to have a constant location.
!9 = !{!"yet another root"}
!10 = !{!"node", !9, i64 1}
+;.
+; CHECK: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
+; CHECK: [[META1]] = !{!"C", [[META2:![0-9]+]]}
+; CHECK: [[META2]] = !{!"A", [[META3:![0-9]+]]}
+; CHECK: [[META3]] = !{!"tbaa root"}
+; CHECK: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
+; CHECK: [[META5]] = !{!"B", [[META2]]}
+; CHECK: [[TBAA6]] = !{[[META2]], [[META2]], i64 0}
+; CHECK: [[TBAA7]] = !{[[META8:![0-9]+]], [[META9:![0-9]+]], i64 0}
+; CHECK: [[META8]] = !{!"struct X", [[META9]], i64 0}
+; CHECK: [[META9]] = !{!"int", [[META10:![0-9]+]], i64 0}
+; CHECK: [[META10]] = !{!"char", [[META3]], i64 0}
+;.
diff --git a/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll b/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll
index 7fbf506..70e5e1a 100644
--- a/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll
+++ b/llvm/test/Transforms/NewGVN/unreachable_block_infinite_loop.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -5,11 +6,11 @@ target triple = "x86_64-apple-darwin10.0"
define i32 @test2() nounwind ssp {
entry:
- ret i32 0
+ ret i32 0
unreachable_block:
- %a = add i32 %a, 1
- ret i32 %a
+ %a = add i32 %a, 1
+ ret i32 %a
}
define i32 @pr23096_test0() {
diff --git a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
index b863d23..2a1fcf3 100644
--- a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
+++ b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
@@ -1,21 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; Skip dead MemoryPhis when performing memory congruency verification
; in NewGVN.
; RUN: opt -S -passes=newgvn %s | FileCheck %s
; REQUIRES: asserts
-; CHECK: define void @tinkywinky() {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 false, label %body, label %end
-; CHECK: body:
-; CHECK-NEXT: store i8 poison, ptr null
-; CHECK-NEXT: br label %end
-; CHECK: end:
-; CHECK-NEXT: ret void
-; CHECK-NEXT: }
declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
define void @tinkywinky() {
+; CHECK-LABEL: define void @tinkywinky() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[BODY:%.*]], label [[END:%.*]]
+; CHECK: body:
+; CHECK-NEXT: store i8 poison, ptr null, align 1
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
entry:
call void @llvm.lifetime.start.p0(i64 4, ptr undef)
br i1 false, label %body, label %end
diff --git a/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll b/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll
index 2febea7..d6daff9 100644
--- a/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll
+++ b/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll
@@ -1,13 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=newgvn -S < %s | FileCheck %s
%struct.t = type { ptr }
; The loaded address and the location of the address itself are not aliased,
; so the second reload is not necessary. Check that it can be eliminated.
-; CHECK-LABEL: test1
-; CHECK: load
-; CHECK-NOT: load
define void @test1(ptr nocapture readonly %p, i32 %v) #0 {
+; CHECK-LABEL: define void @test1(
+; CHECK-SAME: ptr nocapture readonly [[P:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P]], align 4, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[TBAA5]]
+; CHECK-NEXT: ret void
+;
entry:
%0 = load ptr, ptr %p, align 4, !tbaa !1
store volatile i32 %v, ptr %0, align 4, !tbaa !6
@@ -18,11 +24,16 @@ entry:
; The store via the loaded address may overwrite the address itself.
; Make sure that both loads remain.
-; CHECK-LABEL: test2
-; CHECK: load
-; CHECK: store
-; CHECK: load
define void @test2(ptr nocapture readonly %p, i32 %v) #0 {
+; CHECK-LABEL: define void @test2(
+; CHECK-SAME: ptr nocapture readonly [[P:%.*]], i32 [[V:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[P]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: ret void
+;
entry:
%0 = load ptr, ptr %p, align 4, !tbaa !1
store volatile i32 %v, ptr %0, align 4, !tbaa !1
@@ -33,11 +44,16 @@ entry:
; The loads are ordered and non-monotonic. Although they are not aliased to
; the stores, make sure both are preserved.
-; CHECK-LABEL: test3
-; CHECK: load
-; CHECK: store
-; CHECK: load
define void @test3(ptr nocapture readonly %p, i32 %v) #0 {
+; CHECK-LABEL: define void @test3(
+; CHECK-SAME: ptr nocapture readonly [[P:%.*]], i32 [[V:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load atomic ptr, ptr [[P]] acquire, align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[TBAA5]]
+; CHECK-NEXT: [[TMP1:%.*]] = load atomic ptr, ptr [[P]] acquire, align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP1]], align 4, !tbaa [[TBAA5]]
+; CHECK-NEXT: ret void
+;
entry:
%0 = load atomic ptr, ptr %p acquire, align 4, !tbaa !1
store volatile i32 %v, ptr %0, align 4, !tbaa !6
@@ -56,3 +72,12 @@ attributes #0 = { norecurse nounwind }
!6 = !{!7, !7, i64 0}
!7 = !{!"int", !4, i64 0}
+;.
+; CHECK: [[TBAA0]] = !{[[META1:![0-9]+]], [[META2:![0-9]+]], i64 0}
+; CHECK: [[META1]] = !{!"", [[META2]], i64 0}
+; CHECK: [[META2]] = !{!"any pointer", [[META3:![0-9]+]], i64 0}
+; CHECK: [[META3]] = !{!"omnipotent char", [[META4:![0-9]+]], i64 0}
+; CHECK: [[META4]] = !{!"Simple C/C++ TBAA"}
+; CHECK: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
+; CHECK: [[META6]] = !{!"int", [[META3]], i64 0}
+;.
diff --git a/llvm/test/Transforms/PGOProfile/memop_profile_funclet_wasm.ll b/llvm/test/Transforms/PGOProfile/memop_profile_funclet_wasm.ll
new file mode 100644
index 0000000..f8dcb76
--- /dev/null
+++ b/llvm/test/Transforms/PGOProfile/memop_profile_funclet_wasm.ll
@@ -0,0 +1,48 @@
+; RUN: opt < %s -passes=pgo-instr-gen -S | FileCheck %s --check-prefixes=CHECK,GEN
+; RUN: opt < %s -passes=pgo-instr-gen,instrprof -S | FileCheck %s --check-prefixes=CHECK,LOWER
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+define void @wasm_funclet_op_bundle(ptr %p, ptr %dst, ptr %src) personality ptr @__gxx_wasm_personality_v0 {
+entry:
+ invoke void @foo()
+ to label %try.cont unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %entry
+ %0 = catchswitch within none [label %catch.start] unwind to caller
+
+catch.start: ; preds = %catch.dispatch
+ %1 = catchpad within %0 [ptr null]
+; CHECK: %[[CATCHPAD:.*]] = catchpad
+ %2 = call ptr @llvm.wasm.get.exception(token %1)
+ %3 = call i32 @llvm.wasm.get.ehselector(token %1)
+ %4 = call ptr @__cxa_begin_catch(ptr %2) #3 [ "funclet"(token %1) ]
+ %tmp = load i32, ptr %p, align 4
+ call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 %tmp, i1 false)
+; GEN: call void @llvm.instrprof.value.profile({{.*}}) [ "funclet"(token %[[CATCHPAD]]) ]
+; LOWER: call void @__llvm_profile_instrument_memop({{.*}}) [ "funclet"(token %[[CATCHPAD]]) ]
+ call void @__cxa_end_catch() [ "funclet"(token %1) ]
+ catchret from %1 to label %try.cont
+
+try.cont: ; preds = %catch.start, %entry
+ ret void
+}
+
+declare void @foo()
+declare i32 @__gxx_wasm_personality_v0(...)
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare ptr @llvm.wasm.get.exception(token) #0
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare i32 @llvm.wasm.get.ehselector(token) #0
+; Function Attrs: nounwind memory(none)
+declare i32 @llvm.eh.typeid.for(ptr) #1
+declare ptr @__cxa_begin_catch(ptr)
+declare void @__cxa_end_catch()
+; Function Attrs: nocallback nofree nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) #2
+
+attributes #0 = { nocallback nofree nosync nounwind willreturn }
+attributes #1 = { nounwind memory(none) }
+attributes #2 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+attributes #3 = { nounwind }
diff --git a/llvm/test/Transforms/PGOProfile/vtable_prof_unsupported.ll b/llvm/test/Transforms/PGOProfile/vtable_prof_unsupported.ll
new file mode 100644
index 0000000..f72a20f
--- /dev/null
+++ b/llvm/test/Transforms/PGOProfile/vtable_prof_unsupported.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -passes=pgo-instr-gen -enable-vtable-value-profiling -S 2>&1 | FileCheck %s
+
+; Test that unsupported warning is emitted for non-ELF object files.
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-macosx14.0.0"
+
+; CHECK: warning: {{.*}} VTable value profiling is presently not supported for non-ELF object formats
+
+@_ZTV4Base = constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr null, ptr @_ZN4Base4funcEi] }, !type !0, !type !1
+@_ZTV7Derived = constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr null, ptr @_ZN7Derived4funcEi] }, !type !0, !type !1, !type !2, !type !3
+
+@llvm.compiler.used = appending global [2 x ptr] [ptr @_ZTV4Base, ptr @_ZTV7Derived], section "llvm.metadata"
+
+define i32 @_Z4funci(i32 %a) {
+entry:
+ %call = call ptr @_Z10createTypev()
+ %vtable = load ptr, ptr %call
+ %0 = call i1 @llvm.public.type.test(ptr %vtable, metadata !"_ZTS7Derived")
+ call void @llvm.assume(i1 %0)
+ %1 = load ptr, ptr %vtable
+ %call1 = call i32 %1(ptr %call, i32 %a)
+ ret i32 %call1
+}
+
+declare ptr @_Z10createTypev()
+declare i1 @llvm.public.type.test(ptr, metadata)
+declare void @llvm.assume(i1)
+declare i32 @_ZN4Base4funcEi(ptr, i32)
+declare i32 @_ZN7Derived4funcEi(ptr , i32)
+
+!0 = !{i64 16, !"_ZTS4Base"}
+!1 = !{i64 16, !"_ZTSM4BaseFiiE.virtual"}
+!2 = !{i64 16, !"_ZTS7Derived"}
+!3 = !{i64 16, !"_ZTSM7DerivedFiiE.virtual"}
diff --git a/llvm/test/Transforms/PGOProfile/vtable_profile.ll b/llvm/test/Transforms/PGOProfile/vtable_profile.ll
new file mode 100644
index 0000000..a844003
--- /dev/null
+++ b/llvm/test/Transforms/PGOProfile/vtable_profile.ll
@@ -0,0 +1,98 @@
+; RUN: opt < %s -passes=pgo-instr-gen -enable-vtable-value-profiling -S 2>&1 | FileCheck %s --check-prefix=GEN --implicit-check-not="VTable value profiling is presently not supported"
+; RUN: opt < %s -passes=pgo-instr-gen,instrprof -enable-vtable-value-profiling -S 2>&1 | FileCheck %s --check-prefix=LOWER --implicit-check-not="VTable value profiling is presently not supported"
+
+; __llvm_prf_vnm stores zlib-compressed vtable names.
+; REQUIRES: zlib
+
+source_filename = "vtable_local.ll"
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; The test IR is generated based on the following C++ program.
+; Base1 has external linkage and Base2 has local linkage.
+; class Derived uses multiple inheritance so its virtual table
+; global variable contains two vtables. func1 is loaded from
+; the vtable compatible with class Base1, and func2 is loaded
+; from the vtable compatible with class Base2.
+
+; class Base1 {
+; public:
+; virtual int func1(int a) ;
+; };
+;
+; namespace {
+; class Base2 {
+; public:
+; __attribute__((noinline)) virtual int func2(int a) {
+; return a;
+; }
+; };
+; }
+
+; class Derived : public Base1, public Base2 {
+; public:
+; Derived(int c) : v(c) {}
+; private:
+; int v;
+; };
+;
+; Derived* createType();
+
+; int func(int a) {
+; Derived* d = createType();
+; return d->func2(a) + d->func1(a);
+; }
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@_ZTV7Derived = constant { [3 x ptr], [3 x ptr] } { [3 x ptr] [ptr null, ptr null, ptr @_ZN5Base15func1Ei], [3 x ptr] [ptr inttoptr (i64 -8 to ptr), ptr null, ptr @_ZN12_GLOBAL__N_15Base25func2Ei] }, !type !0, !type !3, !type !6, !type !8, !type !10
+@_ZTV5Base1 = available_externally constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr null, ptr @_ZN5Base15func1Ei] }, !type !0
+@_ZTVN12_GLOBAL__N_15Base2E = internal constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr null, ptr @_ZN12_GLOBAL__N_15Base25func2Ei] }, !type !11, !type !8; !vcall_visibility !12
+@llvm.compiler.used = appending global [1 x ptr] [ptr @_ZTV5Base1], section "llvm.metadata"
+
+; GEN: __llvm_profile_raw_version = comdat any
+; GEN: __llvm_profile_raw_version = hidden constant i64 72057594037927946, comdat
+; GEN: __profn__Z4funci = private constant [8 x i8] c"_Z4funci"
+
+; LOWER: $__profvt__ZTV7Derived = comdat nodeduplicate
+; LOWER: $"__profvt_vtable_local.ll;_ZTVN12_GLOBAL__N_15Base2E" = comdat nodeduplicate
+; LOWER: @__profvt__ZTV7Derived = global { i64, ptr, i32 } { i64 -4576307468236080025, ptr @_ZTV7Derived, i32 48 }, section "__llvm_prf_vtab", comdat, align 8
+; LOWER: @"__profvt_vtable_local.ll;_ZTVN12_GLOBAL__N_15Base2E" = internal global { i64, ptr, i32 } { i64 1419990121885302679, ptr @_ZTVN12_GLOBAL__N_15Base2E, i32 24 }, section "__llvm_prf_vtab", comdat, align 8
+; LOWER: @__llvm_prf_vnm = private constant [64 x i8] c"7>x\DA\8B\8F\0A\093wI-\CA,KMa,+IL\CAI\8D\CF\C9ON\CC\D1\CB\C9\B1\8E\07J\FA\19\1A\C5\BB\FB\F8;9\FA\C4\C7\FB\C5\1B\9A:%\16\A7\1A\B9\02\00\19:\12o", section "__llvm_prf_vns", align 1
+; LOWER: @llvm.used = appending global [5 x ptr] [ptr @__profvt__ZTV7Derived, ptr @"__profvt_vtable_local.ll;_ZTVN12_GLOBAL__N_15Base2E", ptr @__llvm_prf_vnodes, ptr @__llvm_prf_nm, ptr @__llvm_prf_vnm], section "llvm.metadata"
+
+define i32 @_Z4funci(i32 %a) {
+entry:
+ %call = call ptr @_Z10createTypev()
+ %add.ptr = getelementptr inbounds i8, ptr %call, i64 8
+ %vtable = load ptr, ptr %add.ptr
+; GEN: [[P1:%[0-9]+]] = ptrtoint ptr %vtable to i64
+; GEN: call void @llvm.instrprof.value.profile(ptr @__profn__Z4funci, i64 [[CFGHash:[0-9]+]], i64 [[P1]], i32 2, i32 0)
+; LOWER: [[P1:%[0-9]+]] = ptrtoint ptr %vtable to i64
+; LOWER: call void @__llvm_profile_instrument_target(i64 [[P1]], ptr @__profd__Z4funci, i32 2)
+ %vfunc1 = load ptr, ptr %vtable
+ %call1 = call i32 %vfunc1(ptr %add.ptr, i32 %a)
+ %vtable2 = load ptr, ptr %call
+; GEN: [[P2:%[0-9]+]] = ptrtoint ptr %vtable2 to i64
+; GEN: call void @llvm.instrprof.value.profile(ptr @__profn__Z4funci, i64 [[CFGHash]], i64 [[P2]], i32 2, i32 1)
+; LOWER: [[P2:%[0-9]+]] = ptrtoint ptr %vtable2 to i64
+; LOWER: call void @__llvm_profile_instrument_target(i64 [[P2]], ptr @__profd__Z4funci, i32 3)
+ %vfunc2 = load ptr, ptr %vtable2
+ %call4 = call i32 %vfunc2(ptr %call, i32 %a)
+ %add = add nsw i32 %call1, %call4
+ ret i32 %add
+}
+
+declare ptr @_Z10createTypev()
+declare i32 @_ZN12_GLOBAL__N_15Base25func2Ei(ptr %this, i32 %a)
+declare i32 @_ZN5Base15func1Ei(ptr, i32)
+
+!0 = !{i64 16, !"_ZTS5Base1"}
+!3 = !{i64 16, !"_ZTS7Derived"}
+!6 = !{i64 40, !7}
+!7 = distinct !{}
+!8 = !{i64 16, !9}
+!9 = distinct !{}
+!10 = !{i64 40, !9}
+!11 = !{i64 16, !7}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
index 33cfd0a..ad100c3 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
@@ -164,10 +164,10 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 16
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META11:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !alias.scope [[META9]], !noalias [[META11]]
-; CHECK-NEXT: [[TMP10:%.*]] = select <4 x i1> [[TMP2]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[WIDE_LOAD10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = fadd <4 x float> [[TMP6]], [[TMP10]]
-; CHECK-NEXT: [[TMP11:%.*]] = select <4 x i1> [[TMP3]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[WIDE_LOAD11]]
-; CHECK-NEXT: [[PREDPHI12:%.*]] = fadd <4 x float> [[TMP7]], [[TMP11]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd <4 x float> [[TMP6]], [[WIDE_LOAD10]]
+; CHECK-NEXT: [[TMP11:%.*]] = fadd <4 x float> [[TMP7]], [[WIDE_LOAD11]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP2]], <4 x float> [[TMP6]], <4 x float> [[TMP10]]
+; CHECK-NEXT: [[PREDPHI12:%.*]] = select <4 x i1> [[TMP3]], <4 x float> [[TMP7]], <4 x float> [[TMP11]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 16
; CHECK-NEXT: store <4 x float> [[PREDPHI]], ptr [[TMP8]], align 4, !alias.scope [[META9]], !noalias [[META11]]
; CHECK-NEXT: store <4 x float> [[PREDPHI12]], ptr [[TMP12]], align 4, !alias.scope [[META9]], !noalias [[META11]]
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
index 73bcee5..36bcda4 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
@@ -49,7 +49,7 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[WHILE_BODY_PREHEADER16]]
-; CHECK: while.body.preheader16:
+; CHECK: while.body.preheader18:
; CHECK-NEXT: [[BLKCNT_06_PH:%.*]] = phi i32 [ [[BLOCKSIZE]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[PSRCA_ADDR_05_PH:%.*]] = phi ptr [ [[PSRCA]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END7]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[PDST_ADDR_04_PH:%.*]] = phi ptr [ [[PDST]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END9]], [[MIDDLE_BLOCK]] ]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll
index 8f1c52c..708cdc9 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll
@@ -14,8 +14,7 @@ define i1 @test1(i32 %c) {
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 20
; CHECK-NEXT: [[SWITCH_CAST:%.*]] = trunc i32 [[SWITCH_TABLEIDX]] to i20
; CHECK-NEXT: [[SWITCH_DOWNSHIFT:%.*]] = lshr i20 -490991, [[SWITCH_CAST]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i20 [[SWITCH_DOWNSHIFT]], 1
-; CHECK-NEXT: [[SWITCH_MASKED:%.*]] = icmp ne i20 [[TMP1]], 0
+; CHECK-NEXT: [[SWITCH_MASKED:%.*]] = trunc i20 [[SWITCH_DOWNSHIFT]] to i1
; CHECK-NEXT: [[I_0:%.*]] = select i1 [[TMP0]], i1 [[SWITCH_MASKED]], i1 false
; CHECK-NEXT: ret i1 [[I_0]]
;
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
index 211c90b..495ec0a 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
@@ -11,16 +11,14 @@ define <4 x i64> @PR67803(<4 x i64> %x, <4 x i64> %y, <4 x i64> %a, <4 x i64> %b
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i32> [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[CMP_I21:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[SEXT_I22:%.*]] = sext <4 x i1> [[CMP_I21]] to <4 x i32>
-; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[SEXT_I22]] to <2 x i64>
; CHECK-NEXT: [[CMP_I:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[SEXT_I]] to <2 x i64>
-; CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[SEXT_I22]], <4 x i32> [[SEXT_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <32 x i8> [[TMP5]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <32 x i8> [[TMP7]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i64> [[SHUFFLE_I]] to <32 x i8>
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <32 x i8> [[TMP9]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP11:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP6]], <16 x i8> [[TMP8]], <16 x i8> [[TMP10]])
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
@@ -28,7 +26,7 @@ define <4 x i64> @PR67803(<4 x i64> %x, <4 x i64> %y, <4 x i64> %a, <4 x i64> %b
; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <32 x i8> [[TMP13]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP15:%.*]] = bitcast <4 x i64> [[B]] to <32 x i8>
; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <32 x i8> [[TMP15]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x i64> [[SHUFFLE_I]] to <32 x i8>
+; CHECK-NEXT: [[TMP17:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <32 x i8> [[TMP17]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP19:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP14]], <16 x i8> [[TMP16]], <16 x i8> [[TMP18]])
; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <2 x i64>
diff --git a/llvm/test/Transforms/Reassociate/vaarg_movable.ll b/llvm/test/Transforms/Reassociate/vaarg_movable.ll
index 337877a..4e45b21 100644
--- a/llvm/test/Transforms/Reassociate/vaarg_movable.ll
+++ b/llvm/test/Transforms/Reassociate/vaarg_movable.ll
@@ -10,13 +10,13 @@ define i32 @func(i32 %dummy, ...) {
;
; CHECK-LABEL: @func(
; CHECK-NEXT: [[VARARGS:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.va_start(ptr [[VARARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VARARGS]])
; CHECK-NEXT: [[V0:%.*]] = va_arg ptr [[VARARGS]], i32
; CHECK-NEXT: [[V1:%.*]] = va_arg ptr [[VARARGS]], i32
; CHECK-NEXT: [[V0_NEG:%.*]] = sub i32 0, [[V0]]
; CHECK-NEXT: [[SUB:%.*]] = add i32 [[V0_NEG]], 1
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], [[V1]]
-; CHECK-NEXT: call void @llvm.va_end(ptr [[VARARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VARARGS]])
; CHECK-NEXT: ret i32 [[ADD]]
;
%varargs = alloca ptr, align 8
diff --git a/llvm/test/Transforms/SCCP/add-nuw-nsw-flags.ll b/llvm/test/Transforms/SCCP/add-nuw-nsw-flags.ll
index b8f5d5d..05d9acd 100644
--- a/llvm/test/Transforms/SCCP/add-nuw-nsw-flags.ll
+++ b/llvm/test/Transforms/SCCP/add-nuw-nsw-flags.ll
@@ -240,3 +240,32 @@ then:
else:
ret i16 0
}
+
+define i1 @test_add_nuw_sub(i32 %a) {
+; CHECK-LABEL: @test_add_nuw_sub(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[A:%.*]], 10000
+; CHECK-NEXT: [[SUB:%.*]] = add i32 [[ADD]], -5000
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %add = add nuw i32 %a, 10000
+ %sub = add i32 %add, -5000
+ %cond = icmp ult i32 %sub, 5000
+ ret i1 %cond
+}
+
+define i1 @test_add_nsw_sub(i32 %a) {
+; CHECK-LABEL: @test_add_nsw_sub(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[A:%.*]], 10000
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[ADD]], -5000
+; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[SUB]], 5000
+; CHECK-NEXT: ret i1 [[COND]]
+;
+entry:
+ %add = add nsw i32 %a, 10000
+ %sub = add i32 %add, -5000
+ %cond = icmp ult i32 %sub, 5000
+ ret i1 %cond
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
index cef7916..5e3fd15 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
@@ -17,12 +17,13 @@ define void @test1(<4 x i16> %a, <4 x i16> %b, ptr %p) {
; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[S0]]
; CHECK-NEXT: [[LOAD0:%.*]] = load i64, ptr [[GEP0]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[SUB0]], <4 x i32> poison, <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP1:%.*]] = sext <2 x i32> [[TMP0]] to <2 x i64>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[TMP2]]
; CHECK-NEXT: [[LOAD1:%.*]] = load i64, ptr [[GEP1]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[TMP4]]
; CHECK-NEXT: [[LOAD2:%.*]] = load i64, ptr [[GEP2]], align 4
; CHECK-NEXT: [[E3:%.*]] = extractelement <4 x i32> [[SUB0]], i32 3
; CHECK-NEXT: [[S3:%.*]] = sext i32 [[E3]] to i64
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-buildvector-with-minbitwidth-user.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-buildvector-with-minbitwidth-user.ll
new file mode 100644
index 0000000..6907724
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-buildvector-with-minbitwidth-user.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @h() {
+; CHECK-LABEL: define void @h() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr null, i64 16
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 0, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i32> [[TMP0]] to <8 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i1> zeroinitializer, [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i1> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i1> [[TMP4]] to <8 x i16>
+; CHECK-NEXT: store <8 x i16> [[TMP3]], ptr [[ARRAYIDX2]], align 2
+; CHECK-NEXT: ret void
+;
+entry:
+ %conv9 = zext i16 0 to i32
+ %arrayidx2 = getelementptr i8, ptr null, i64 16
+ %conv310 = zext i16 0 to i32
+ %add4 = or i32 %conv310, %conv9
+ %sub = or i32 %conv9, %conv310
+ %conv15 = sext i16 0 to i32
+ %shr = ashr i32 0, 0
+ %arrayidx18 = getelementptr i8, ptr null, i64 24
+ %conv19 = sext i16 0 to i32
+ %sub20 = or i32 %shr, %conv19
+ %shr29 = ashr i32 0, 0
+ %add30 = or i32 %shr29, %conv15
+ %sub39 = or i32 %sub, %sub20
+ %conv40 = trunc i32 %sub39 to i16
+ store i16 %conv40, ptr %arrayidx2, align 2
+ %sub44 = or i32 %add4, %add30
+ %conv45 = trunc i32 %sub44 to i16
+ store i16 %conv45, ptr %arrayidx18, align 2
+ %arrayidx2.1 = getelementptr i8, ptr null, i64 18
+ %conv3.112 = zext i16 0 to i32
+ %add4.1 = or i32 %conv3.112, 0
+ %sub.1 = or i32 0, %conv3.112
+ %conv15.1 = sext i16 0 to i32
+ %shr.1 = ashr i32 0, 0
+ %arrayidx18.1 = getelementptr i8, ptr null, i64 26
+ %conv19.1 = sext i16 0 to i32
+ %sub20.1 = or i32 %shr.1, %conv19.1
+ %shr29.1 = ashr i32 0, 0
+ %add30.1 = or i32 %shr29.1, %conv15.1
+ %sub39.1 = or i32 %sub.1, %sub20.1
+ %conv40.1 = trunc i32 %sub39.1 to i16
+ store i16 %conv40.1, ptr %arrayidx2.1, align 2
+ %sub44.1 = or i32 %add4.1, %add30.1
+ %conv45.1 = trunc i32 %sub44.1 to i16
+ store i16 %conv45.1, ptr %arrayidx18.1, align 2
+ %conv.213 = zext i16 0 to i32
+ %arrayidx2.2 = getelementptr i8, ptr null, i64 20
+ %conv3.214 = zext i16 0 to i32
+ %add4.2 = or i32 0, %conv.213
+ %sub.2 = or i32 0, %conv3.214
+ %conv15.2 = sext i16 0 to i32
+ %shr.2 = ashr i32 0, 0
+ %arrayidx18.2 = getelementptr i8, ptr null, i64 28
+ %conv19.2 = sext i16 0 to i32
+ %sub20.2 = or i32 %shr.2, %conv19.2
+ %shr29.2 = ashr i32 0, 0
+ %add30.2 = or i32 %shr29.2, %conv15.2
+ %sub39.2 = or i32 %sub.2, %sub20.2
+ %conv40.2 = trunc i32 %sub39.2 to i16
+ store i16 %conv40.2, ptr %arrayidx2.2, align 2
+ %sub44.2 = or i32 %add4.2, %add30.2
+ %conv45.2 = trunc i32 %sub44.2 to i16
+ store i16 %conv45.2, ptr %arrayidx18.2, align 2
+ %conv.315 = zext i16 0 to i32
+ %arrayidx2.3 = getelementptr i8, ptr null, i64 22
+ %conv3.316 = zext i16 0 to i32
+ %add4.3 = or i32 0, %conv.315
+ %sub.3 = or i32 0, %conv3.316
+ %conv15.3 = sext i16 0 to i32
+ %shr.3 = ashr i32 0, 0
+ %arrayidx18.3 = getelementptr i8, ptr null, i64 30
+ %conv19.3 = sext i16 0 to i32
+ %sub20.3 = or i32 %shr.3, %conv19.3
+ %shr29.3 = ashr i32 0, 0
+ %add30.3 = or i32 %shr29.3, %conv15.3
+ %sub39.3 = or i32 %sub.3, %sub20.3
+ %conv40.3 = trunc i32 %sub39.3 to i16
+ store i16 %conv40.3, ptr %arrayidx2.3, align 2
+ %sub44.3 = or i32 %add4.3, %add30.3
+ %conv45.3 = trunc i32 %sub44.3 to i16
+ store i16 %conv45.3, ptr %arrayidx18.3, align 2
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-with-minbith-user.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-with-minbith-user.ll
new file mode 100644
index 0000000..d51ef0b
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-with-minbith-user.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @h() {
+; CHECK-LABEL: define void @h() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr null, i64 16
+; CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i32> zeroinitializer to <8 x i1>
+; CHECK-NEXT: [[TMP1:%.*]] = sub <8 x i1> [[TMP0]], zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = add <8 x i1> [[TMP0]], zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP1]], <8 x i1> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i1> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i1> [[TMP5]] to <8 x i16>
+; CHECK-NEXT: store <8 x i16> [[TMP4]], ptr [[ARRAYIDX2]], align 2
+; CHECK-NEXT: ret void
+;
+entry:
+ %conv9 = zext i16 0 to i32
+ %arrayidx2 = getelementptr i8, ptr null, i64 16
+ %conv310 = zext i16 0 to i32
+ %add4 = add i32 %conv310, %conv9
+ %sub = sub i32 0, %conv310
+ %conv15 = sext i16 0 to i32
+ %shr = ashr i32 0, 0
+ %arrayidx18 = getelementptr i8, ptr null, i64 24
+ %conv19 = sext i16 0 to i32
+ %sub20 = sub i32 %shr, %conv19
+ %shr29 = ashr i32 0, 0
+ %add30 = add i32 %shr29, %conv15
+ %sub39 = or i32 %sub, %sub20
+ %conv40 = trunc i32 %sub39 to i16
+ store i16 %conv40, ptr %arrayidx2, align 2
+ %sub44 = or i32 %add4, %add30
+ %conv45 = trunc i32 %sub44 to i16
+ store i16 %conv45, ptr %arrayidx18, align 2
+ %arrayidx2.1 = getelementptr i8, ptr null, i64 18
+ %conv3.112 = zext i16 0 to i32
+ %add4.1 = add i32 %conv3.112, 0
+ %sub.1 = sub i32 0, %conv3.112
+ %conv15.1 = sext i16 0 to i32
+ %shr.1 = ashr i32 0, 0
+ %arrayidx18.1 = getelementptr i8, ptr null, i64 26
+ %conv19.1 = sext i16 0 to i32
+ %sub20.1 = sub i32 %shr.1, %conv19.1
+ %shr29.1 = ashr i32 0, 0
+ %add30.1 = add i32 %shr29.1, %conv15.1
+ %sub39.1 = or i32 %sub.1, %sub20.1
+ %conv40.1 = trunc i32 %sub39.1 to i16
+ store i16 %conv40.1, ptr %arrayidx2.1, align 2
+ %sub44.1 = or i32 %add4.1, %add30.1
+ %conv45.1 = trunc i32 %sub44.1 to i16
+ store i16 %conv45.1, ptr %arrayidx18.1, align 2
+ %conv.213 = zext i16 0 to i32
+ %arrayidx2.2 = getelementptr i8, ptr null, i64 20
+ %conv3.214 = zext i16 0 to i32
+ %add4.2 = add i32 0, %conv.213
+ %sub.2 = sub i32 0, %conv3.214
+ %conv15.2 = sext i16 0 to i32
+ %shr.2 = ashr i32 0, 0
+ %arrayidx18.2 = getelementptr i8, ptr null, i64 28
+ %conv19.2 = sext i16 0 to i32
+ %sub20.2 = sub i32 %shr.2, %conv19.2
+ %shr29.2 = ashr i32 0, 0
+ %add30.2 = add i32 %shr29.2, %conv15.2
+ %sub39.2 = or i32 %sub.2, %sub20.2
+ %conv40.2 = trunc i32 %sub39.2 to i16
+ store i16 %conv40.2, ptr %arrayidx2.2, align 2
+ %sub44.2 = or i32 %add4.2, %add30.2
+ %conv45.2 = trunc i32 %sub44.2 to i16
+ store i16 %conv45.2, ptr %arrayidx18.2, align 2
+ %conv.315 = zext i16 0 to i32
+ %arrayidx2.3 = getelementptr i8, ptr null, i64 22
+ %conv3.316 = zext i16 0 to i32
+ %add4.3 = add i32 0, %conv.315
+ %sub.3 = sub i32 0, %conv3.316
+ %conv15.3 = sext i16 0 to i32
+ %shr.3 = ashr i32 0, 0
+ %arrayidx18.3 = getelementptr i8, ptr null, i64 30
+ %conv19.3 = sext i16 0 to i32
+ %sub20.3 = sub i32 %shr.3, %conv19.3
+ %shr29.3 = ashr i32 0, 0
+ %add30.3 = add i32 %shr29.3, %conv15.3
+ %sub39.3 = or i32 %sub.3, %sub20.3
+ %conv40.3 = trunc i32 %sub39.3 to i16
+ store i16 %conv40.3, ptr %arrayidx2.3, align 2
+ %sub44.3 = or i32 %add4.3, %add30.3
+ %conv45.3 = trunc i32 %sub44.3 to i16
+ store i16 %conv45.3, ptr %arrayidx18.3, align 2
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr2.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr2.ll
index 47485e5..1cce520 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr2.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
;test_i16_extend NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=aarch64--linux-gnu -passes=slp-vectorizer,dce,instcombine -slp-threshold=-7 -pass-remarks-output=%t < %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64--linux-gnu -passes=slp-vectorizer,dce,instcombine -slp-threshold=-5 -pass-remarks-output=%t < %s | FileCheck %s
; RUN: cat %t | FileCheck -check-prefix=YAML %s
-; RUN: opt -S -mtriple=aarch64--linux-gnu -passes='slp-vectorizer,dce,instcombine' -slp-threshold=-7 -pass-remarks-output=%t < %s | FileCheck %s
+; RUN: opt -S -mtriple=aarch64--linux-gnu -passes='slp-vectorizer,dce,instcombine' -slp-threshold=-5 -pass-remarks-output=%t < %s | FileCheck %s
; RUN: cat %t | FileCheck -check-prefix=YAML %s
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
index 1986b51..7c5f984 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
@@ -228,7 +228,7 @@ for.end: ; preds = %for.end.loopexit, %
; YAML-NEXT: Function: test_unrolled_select
; YAML-NEXT: Args:
; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
-; YAML-NEXT: - Cost: '-36'
+; YAML-NEXT: - Cost: '-41'
; YAML-NEXT: - String: ' and with tree size '
; YAML-NEXT: - TreeSize: '10'
@@ -246,15 +246,17 @@ define i32 @test_unrolled_select(ptr noalias nocapture readonly %blk1, ptr noali
; CHECK-NEXT: [[P2_045:%.*]] = phi ptr [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR88:%.*]], [[IF_END_86]] ]
; CHECK-NEXT: [[P1_044:%.*]] = phi ptr [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[IF_END_86]] ]
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[P1_044]], align 1
-; CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16>
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr [[P2_045]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <8 x i32> [[TMP1]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <8 x i32> [[TMP4]], zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <8 x i32> zeroinitializer, [[TMP4]]
-; CHECK-NEXT: [[TMP7:%.*]] = select <8 x i1> [[TMP5]], <8 x i32> [[TMP6]], <8 x i32> [[TMP4]]
-; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP7]])
-; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP8]], [[S_047]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i16>
+; CHECK-NEXT: [[TMP4:%.*]] = sub <8 x i16> [[TMP1]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i16> [[TMP4]] to <8 x i32>
+; CHECK-NEXT: [[TMP6:%.*]] = icmp slt <8 x i32> [[TMP5]], zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = sub <8 x i16> zeroinitializer, [[TMP4]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <8 x i1> [[TMP6]], <8 x i16> [[TMP7]], <8 x i16> [[TMP4]]
+; CHECK-NEXT: [[TMP9:%.*]] = sext <8 x i16> [[TMP8]] to <8 x i32>
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP9]])
+; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP10]], [[S_047]]
; CHECK-NEXT: [[CMP83:%.*]] = icmp slt i32 [[OP_RDX]], [[LIM:%.*]]
; CHECK-NEXT: br i1 [[CMP83]], label [[IF_END_86]], label [[FOR_END_LOOPEXIT:%.*]]
; CHECK: if.end.86:
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll
index d67fdc1..a7a7f64 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-add-i64.ll
@@ -28,21 +28,11 @@ entry:
define i64 @red_zext_ld_4xi64(ptr %ptr) {
; CHECK-LABEL: @red_zext_ld_4xi64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LD0:%.*]] = load i8, ptr [[PTR:%.*]], align 1
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LD0]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP]], align 1
-; CHECK-NEXT: [[ZEXT_1:%.*]] = zext i8 [[LD1]] to i64
-; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[ZEXT]], [[ZEXT_1]]
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 2
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_1]], align 1
-; CHECK-NEXT: [[ZEXT_2:%.*]] = zext i8 [[LD2]] to i64
-; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i64 [[ADD_1]], [[ZEXT_2]]
-; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 3
-; CHECK-NEXT: [[LD3:%.*]] = load i8, ptr [[GEP_2]], align 1
-; CHECK-NEXT: [[ZEXT_3:%.*]] = zext i8 [[LD3]] to i64
-; CHECK-NEXT: [[ADD_3:%.*]] = add nuw nsw i64 [[ADD_2]], [[ZEXT_3]]
-; CHECK-NEXT: ret i64 [[ADD_3]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PTR:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i64
+; CHECK-NEXT: ret i64 [[TMP3]]
;
entry:
%ld0 = load i8, ptr %ptr
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-frem.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-frem.ll
new file mode 100644
index 0000000..a38f4bd
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-frem.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -S -mtriple=aarch64 -vector-library=ArmPL -passes=slp-vectorizer | FileCheck %s
+
+@a = common global ptr null, align 8
+
+define void @frem_v2double() {
+; CHECK-LABEL: define void @frem_v2double() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr @a, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @a, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = frem <2 x double> [[TMP0]], [[TMP1]]
+; CHECK-NEXT: store <2 x double> [[TMP2]], ptr @a, align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %a0 = load double, ptr getelementptr inbounds (double, ptr @a, i64 0), align 8
+ %a1 = load double, ptr getelementptr inbounds (double, ptr @a, i64 1), align 8
+ %b0 = load double, ptr getelementptr inbounds (double, ptr @a, i64 0), align 8
+ %b1 = load double, ptr getelementptr inbounds (double, ptr @a, i64 1), align 8
+ %r0 = frem double %a0, %b0
+ %r1 = frem double %a1, %b1
+ store double %r0, ptr getelementptr inbounds (double, ptr @a, i64 0), align 8
+ store double %r1, ptr getelementptr inbounds (double, ptr @a, i64 1), align 8
+ ret void
+}
+
+define void @frem_v4float() {
+; CHECK-LABEL: define void @frem_v4float() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr @a, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @a, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = frem <4 x float> [[TMP0]], [[TMP1]]
+; CHECK-NEXT: store <4 x float> [[TMP2]], ptr @a, align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %a0 = load float, ptr getelementptr inbounds (float, ptr @a, i64 0), align 8
+ %a1 = load float, ptr getelementptr inbounds (float, ptr @a, i64 1), align 8
+ %a2 = load float, ptr getelementptr inbounds (float, ptr @a, i64 2), align 8
+ %a3 = load float, ptr getelementptr inbounds (float, ptr @a, i64 3), align 8
+ %b0 = load float, ptr getelementptr inbounds (float, ptr @a, i64 0), align 8
+ %b1 = load float, ptr getelementptr inbounds (float, ptr @a, i64 1), align 8
+ %b2 = load float, ptr getelementptr inbounds (float, ptr @a, i64 2), align 8
+ %b3 = load float, ptr getelementptr inbounds (float, ptr @a, i64 3), align 8
+ %r0 = frem float %a0, %b0
+ %r1 = frem float %a1, %b1
+ %r2 = frem float %a2, %b2
+ %r3 = frem float %a3, %b3
+ store float %r0, ptr getelementptr inbounds (float, ptr @a, i64 0), align 8
+ store float %r1, ptr getelementptr inbounds (float, ptr @a, i64 1), align 8
+ store float %r2, ptr getelementptr inbounds (float, ptr @a, i64 2), align 8
+ store float %r3, ptr getelementptr inbounds (float, ptr @a, i64 3), align 8
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/user-node-not-in-bitwidths.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/user-node-not-in-bitwidths.ll
new file mode 100644
index 0000000..6404cf4
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/user-node-not-in-bitwidths.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @h() {
+; CHECK-LABEL: define void @h() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr null, i64 16
+; CHECK-NEXT: store <8 x i16> zeroinitializer, ptr [[ARRAYIDX2]], align 2
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx2 = getelementptr i8, ptr null, i64 16
+ %conv310 = zext i16 0 to i32
+ %add4 = or i32 %conv310, 0
+ %sub = or i32 0, %conv310
+ %conv15 = sext i16 0 to i32
+ %shr = ashr i32 %conv15, 0
+ %arrayidx18 = getelementptr i8, ptr null, i64 24
+ %conv19 = sext i16 0 to i32
+ %sub20 = or i32 %shr, 0
+ %shr29 = ashr i32 %conv19, 0
+ %add30 = or i32 %shr29, %conv15
+ %sub39 = or i32 %sub, %sub20
+ %conv40 = trunc i32 %sub39 to i16
+ store i16 %conv40, ptr %arrayidx2, align 2
+ %sub44 = or i32 %add4, %add30
+ %conv45 = trunc i32 %sub44 to i16
+ store i16 %conv45, ptr %arrayidx18, align 2
+ %arrayidx2.1 = getelementptr i8, ptr null, i64 18
+ %conv3.112 = zext i16 0 to i32
+ %add4.1 = or i32 %conv3.112, 0
+ %sub.1 = or i32 0, %conv3.112
+ %conv15.1 = sext i16 0 to i32
+ %shr.1 = ashr i32 %conv15.1, 0
+ %arrayidx18.1 = getelementptr i8, ptr null, i64 26
+ %conv19.1 = sext i16 0 to i32
+ %sub20.1 = or i32 %shr.1, 0
+ %shr29.1 = ashr i32 %conv19.1, 0
+ %add30.1 = or i32 %shr29.1, 0
+ %sub39.1 = or i32 %sub.1, %sub20.1
+ %conv40.1 = trunc i32 %sub39.1 to i16
+ store i16 %conv40.1, ptr %arrayidx2.1, align 2
+ %sub44.1 = or i32 %add4.1, %add30.1
+ %conv45.1 = trunc i32 %sub44.1 to i16
+ store i16 %conv45.1, ptr %arrayidx18.1, align 2
+ %conv.213 = zext i16 0 to i32
+ %arrayidx2.2 = getelementptr i8, ptr null, i64 20
+ %conv3.214 = zext i16 0 to i32
+ %add4.2 = or i32 0, %conv.213
+ %sub.2 = or i32 0, %conv3.214
+ %conv15.2 = sext i16 0 to i32
+ %shr.2 = ashr i32 %conv15.2, 0
+ %arrayidx18.2 = getelementptr i8, ptr null, i64 28
+ %conv19.2 = sext i16 0 to i32
+ %sub20.2 = or i32 %shr.2, 0
+ %shr29.2 = ashr i32 %conv19.2, 0
+ %add30.2 = or i32 %shr29.2, 0
+ %sub39.2 = or i32 %sub.2, %sub20.2
+ %conv40.2 = trunc i32 %sub39.2 to i16
+ store i16 %conv40.2, ptr %arrayidx2.2, align 2
+ %sub44.2 = or i32 %add4.2, %add30.2
+ %conv45.2 = trunc i32 %sub44.2 to i16
+ store i16 %conv45.2, ptr %arrayidx18.2, align 2
+ %conv.315 = zext i16 0 to i32
+ %arrayidx2.3 = getelementptr i8, ptr null, i64 22
+ %conv3.316 = zext i16 0 to i32
+ %add4.3 = or i32 0, %conv.315
+ %sub.3 = or i32 0, %conv3.316
+ %conv15.3 = sext i16 0 to i32
+ %shr.3 = ashr i32 %conv15.3, 0
+ %arrayidx18.3 = getelementptr i8, ptr null, i64 30
+ %conv19.3 = sext i16 0 to i32
+ %sub20.3 = or i32 %shr.3, 0
+ %shr29.3 = ashr i32 %conv19.3, 0
+ %add30.3 = or i32 %shr29.3, 0
+ %sub39.3 = or i32 %sub.3, %sub20.3
+ %conv40.3 = trunc i32 %sub39.3 to i16
+ store i16 %conv40.3, ptr %arrayidx2.3, align 2
+ %sub44.3 = or i32 %add4.3, %add30.3
+ %conv45.3 = trunc i32 %sub44.3 to i16
+ store i16 %conv45.3, ptr %arrayidx18.3, align 2
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
index ed73f7b..d87bdfe 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
@@ -6,7 +6,7 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
+; CHECK-NEXT: [[CONV1:%.*]] = zext i8 [[TMP0]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX1]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x ptr> [[TMP1]], <2 x ptr> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i64> <i64 4, i64 6>
@@ -37,10 +37,10 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; CHECK-NEXT: [[TMP15:%.*]] = load <2 x i8>, ptr [[ADD_PTR_1]], align 1
-; CHECK-NEXT: [[TMP16:%.*]] = zext <2 x i8> [[TMP15]] to <2 x i32>
+; CHECK-NEXT: [[TMP101:%.*]] = zext <2 x i8> [[TMP15]] to <2 x i32>
; CHECK-NEXT: [[TMP17:%.*]] = load <2 x i8>, ptr [[ADD_PTR64_1]], align 1
; CHECK-NEXT: [[TMP18:%.*]] = zext <2 x i8> [[TMP17]] to <2 x i32>
-; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP16]], [[TMP18]]
+; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP101]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_2]], align 1
; CHECK-NEXT: [[TMP21:%.*]] = zext <2 x i8> [[TMP20]] to <2 x i32>
; CHECK-NEXT: [[TMP22:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_2]], align 1
@@ -70,9 +70,9 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[SUB45_2:%.*]] = sub i32 [[TMP39]], [[TMP40]]
; CHECK-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0
; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1
-; CHECK-NEXT: [[ADD46_2:%.*]] = add i32 [[TMP42]], [[TMP41]]
+; CHECK-NEXT: [[CONV:%.*]] = add i32 [[TMP42]], [[TMP41]]
; CHECK-NEXT: [[SUB47_2:%.*]] = sub i32 [[TMP41]], [[TMP42]]
-; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[ADD46_2]], [[ADD44_2]]
+; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[CONV]], [[ADD44_2]]
; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr i8, ptr null, i64 2
; CHECK-NEXT: [[ARRAYIDX22_3:%.*]] = getelementptr i8, ptr null, i64 2
@@ -104,32 +104,32 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP69:%.*]] = sub <2 x i32> [[TMP66]], [[TMP68]]
; CHECK-NEXT: [[TMP70:%.*]] = shl <2 x i32> [[TMP69]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP71:%.*]] = add <2 x i32> [[TMP70]], [[TMP63]]
-; CHECK-NEXT: [[TMP72:%.*]] = add <2 x i32> [[TMP71]], [[TMP58]]
+; CHECK-NEXT: [[TMP16:%.*]] = add <2 x i32> [[TMP71]], [[TMP58]]
; CHECK-NEXT: [[TMP73:%.*]] = sub <2 x i32> [[TMP58]], [[TMP71]]
-; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i32> [[TMP72]], i32 0
-; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i32> [[TMP72]], i32 1
+; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0
+; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i32> [[TMP16]], i32 1
; CHECK-NEXT: [[ADD48_3:%.*]] = add i32 [[TMP74]], [[TMP75]]
; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
; CHECK-NEXT: [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
-; CHECK-NEXT: [[TMP76:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
-; CHECK-NEXT: [[SHR_I:%.*]] = lshr i32 [[TMP76]], 15
-; CHECK-NEXT: [[AND_I:%.*]] = and i32 [[SHR_I]], 65537
-; CHECK-NEXT: [[MUL_I:%.*]] = mul i32 [[AND_I]], 65535
-; CHECK-NEXT: [[SHR_I49:%.*]] = lshr i32 [[ADD46_2]], 15
-; CHECK-NEXT: [[AND_I50:%.*]] = and i32 [[SHR_I49]], 65537
-; CHECK-NEXT: [[MUL_I51:%.*]] = mul i32 [[AND_I50]], 65535
-; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0
-; CHECK-NEXT: [[SHR_I49_1:%.*]] = lshr i32 [[TMP77]], 15
-; CHECK-NEXT: [[AND_I50_1:%.*]] = and i32 [[SHR_I49_1]], 65537
-; CHECK-NEXT: [[MUL_I51_1:%.*]] = mul i32 [[AND_I50_1]], 65535
-; CHECK-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[CONV_1]], 15
+; CHECK-NEXT: [[TMP79:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
+; CHECK-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[TMP79]], 15
; CHECK-NEXT: [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
; CHECK-NEXT: [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535
; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[CONV]], 15
; CHECK-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; CHECK-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
+; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP101]], i32 0
+; CHECK-NEXT: [[SHR_I49_1:%.*]] = lshr i32 [[TMP107]], 15
+; CHECK-NEXT: [[AND_I50_1:%.*]] = and i32 [[SHR_I49_1]], 65537
+; CHECK-NEXT: [[MUL_I51_1:%.*]] = mul i32 [[AND_I50_1]], 65535
+; CHECK-NEXT: [[SHR_I49_4:%.*]] = lshr i32 [[CONV_1]], 15
+; CHECK-NEXT: [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
+; CHECK-NEXT: [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
+; CHECK-NEXT: [[SHR_I49_5:%.*]] = lshr i32 [[CONV1]], 15
+; CHECK-NEXT: [[AND_I50_5:%.*]] = and i32 [[SHR_I49_5]], 65537
+; CHECK-NEXT: [[MUL_I51_5:%.*]] = mul i32 [[AND_I50_5]], 65535
; CHECK-NEXT: [[TMP78:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
-; CHECK-NEXT: [[TMP79:%.*]] = zext <2 x i8> [[TMP78]] to <2 x i32>
+; CHECK-NEXT: [[TMP102:%.*]] = zext <2 x i8> [[TMP78]] to <2 x i32>
; CHECK-NEXT: [[TMP80:%.*]] = insertelement <2 x ptr> [[TMP5]], ptr [[ARRAYIDX22]], i32 1
; CHECK-NEXT: [[TMP81:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP80]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
; CHECK-NEXT: [[TMP82:%.*]] = zext <2 x i8> [[TMP81]] to <2 x i32>
@@ -147,20 +147,20 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP94:%.*]] = zext <2 x i8> [[TMP93]] to <2 x i32>
; CHECK-NEXT: [[TMP95:%.*]] = sub <2 x i32> [[TMP92]], [[TMP94]]
; CHECK-NEXT: [[TMP96:%.*]] = shl <2 x i32> [[TMP95]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i32> [[TMP79]], i32 [[CONV33]], i32 1
+; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV33]], i32 1
; CHECK-NEXT: [[TMP98:%.*]] = sub <2 x i32> [[TMP97]], [[TMP90]]
-; CHECK-NEXT: [[TMP99:%.*]] = add <2 x i32> [[TMP96]], [[TMP98]]
-; CHECK-NEXT: [[TMP100:%.*]] = insertelement <2 x i32> [[TMP79]], i32 [[CONV]], i32 0
-; CHECK-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP100]], [[TMP82]]
-; CHECK-NEXT: [[TMP102:%.*]] = add <2 x i32> [[TMP88]], [[TMP101]]
-; CHECK-NEXT: [[TMP103:%.*]] = shufflevector <2 x i32> [[TMP99]], <2 x i32> [[TMP102]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP104:%.*]] = add <2 x i32> [[TMP99]], [[TMP102]]
-; CHECK-NEXT: [[TMP105:%.*]] = sub <2 x i32> [[TMP102]], [[TMP99]]
-; CHECK-NEXT: [[TMP106:%.*]] = extractelement <2 x i32> [[TMP104]], i32 0
-; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP104]], i32 1
-; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[TMP107]], [[TMP106]]
+; CHECK-NEXT: [[TMP104:%.*]] = add <2 x i32> [[TMP96]], [[TMP98]]
+; CHECK-NEXT: [[TMP100:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV1]], i32 0
+; CHECK-NEXT: [[TMP103:%.*]] = sub <2 x i32> [[TMP100]], [[TMP82]]
+; CHECK-NEXT: [[TMP200:%.*]] = add <2 x i32> [[TMP88]], [[TMP103]]
+; CHECK-NEXT: [[TMP128:%.*]] = shufflevector <2 x i32> [[TMP104]], <2 x i32> [[TMP200]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP165:%.*]] = add <2 x i32> [[TMP104]], [[TMP200]]
+; CHECK-NEXT: [[TMP105:%.*]] = sub <2 x i32> [[TMP200]], [[TMP104]]
+; CHECK-NEXT: [[TMP238:%.*]] = extractelement <2 x i32> [[TMP165]], i32 0
+; CHECK-NEXT: [[TMP143:%.*]] = extractelement <2 x i32> [[TMP165]], i32 1
+; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[TMP143]], [[TMP238]]
; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i32> [[TMP105]], i32 1
-; CHECK-NEXT: [[SHR_I59:%.*]] = lshr i32 [[TMP107]], 15
+; CHECK-NEXT: [[SHR_I59:%.*]] = lshr i32 [[TMP143]], 15
; CHECK-NEXT: [[AND_I60:%.*]] = and i32 [[SHR_I59]], 65537
; CHECK-NEXT: [[MUL_I61:%.*]] = mul i32 [[AND_I60]], 65535
; CHECK-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[TMP108]], 15
@@ -185,7 +185,7 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP125:%.*]] = shl <2 x i32> [[TMP124]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP126:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> <i64 1, i64 3>
; CHECK-NEXT: [[TMP127:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP126]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP128:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32>
+; CHECK-NEXT: [[TMP153:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32>
; CHECK-NEXT: [[TMP129:%.*]] = getelementptr i8, <2 x ptr> [[TMP115]], <2 x i64> <i64 5, i64 7>
; CHECK-NEXT: [[TMP130:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP129]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
; CHECK-NEXT: [[TMP131:%.*]] = zext <2 x i8> [[TMP130]] to <2 x i32>
@@ -195,15 +195,15 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP135:%.*]] = sub <2 x i32> [[TMP131]], [[TMP134]]
; CHECK-NEXT: [[TMP136:%.*]] = shl <2 x i32> [[TMP135]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP137:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV33_1]], i32 1
-; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP137]], [[TMP128]]
+; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP137]], [[TMP153]]
; CHECK-NEXT: [[TMP139:%.*]] = add <2 x i32> [[TMP136]], [[TMP138]]
; CHECK-NEXT: [[TMP140:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV_1]], i32 0
; CHECK-NEXT: [[TMP141:%.*]] = sub <2 x i32> [[TMP140]], [[TMP113]]
; CHECK-NEXT: [[TMP142:%.*]] = add <2 x i32> [[TMP125]], [[TMP141]]
-; CHECK-NEXT: [[TMP143:%.*]] = add <2 x i32> [[TMP139]], [[TMP142]]
+; CHECK-NEXT: [[TMP257:%.*]] = add <2 x i32> [[TMP139]], [[TMP142]]
; CHECK-NEXT: [[TMP144:%.*]] = sub <2 x i32> [[TMP142]], [[TMP139]]
-; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP143]], i32 0
-; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP143]], i32 1
+; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP257]], i32 0
+; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP257]], i32 1
; CHECK-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP146]], [[TMP145]]
; CHECK-NEXT: [[SHR_I54:%.*]] = lshr i32 [[TMP146]], 15
; CHECK-NEXT: [[AND_I55:%.*]] = and i32 [[SHR_I54]], 65537
@@ -217,51 +217,51 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
; CHECK-NEXT: [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB86]]
; CHECK-NEXT: [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB102]]
-; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I]], [[ADD103]]
-; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP76]]
-; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51]], [[ADD105]]
-; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]]
+; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I51_2]], [[ADD103]]
+; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP79]]
+; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51_3]], [[ADD105]]
+; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[CONV]]
; CHECK-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56]], [[SUB104]]
; CHECK-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP146]]
; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61]], [[SUB106]]
-; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP107]]
+; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP143]]
; CHECK-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; CHECK-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; CHECK-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
; CHECK-NEXT: [[TMP150:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB47_2]], i32 1
; CHECK-NEXT: [[TMP152:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB45_2]], i32 1
-; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP151]], [[TMP152]]
+; CHECK-NEXT: [[TMP163:%.*]] = add <2 x i32> [[TMP151]], [[TMP152]]
; CHECK-NEXT: [[TMP154:%.*]] = shufflevector <2 x i32> [[TMP144]], <2 x i32> [[TMP73]], <2 x i32> <i32 1, i32 2>
; CHECK-NEXT: [[TMP155:%.*]] = shufflevector <2 x i32> [[TMP144]], <2 x i32> [[TMP73]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[TMP156:%.*]] = add <2 x i32> [[TMP154]], [[TMP155]]
-; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP153]], i32 1
+; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1
; CHECK-NEXT: [[TMP158:%.*]] = extractelement <2 x i32> [[TMP156]], i32 1
-; CHECK-NEXT: [[TMP159:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP158]], [[TMP157]]
-; CHECK-NEXT: [[TMP160:%.*]] = extractelement <2 x i32> [[TMP153]], i32 0
+; CHECK-NEXT: [[TMP159:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP163]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[TMP158]], [[TMP157]]
+; CHECK-NEXT: [[TMP160:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0
; CHECK-NEXT: [[TMP161:%.*]] = extractelement <2 x i32> [[TMP156]], i32 0
-; CHECK-NEXT: [[TMP162:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD78_1:%.*]] = add i32 [[TMP161]], [[TMP160]]
-; CHECK-NEXT: [[TMP163:%.*]] = sub <2 x i32> [[TMP153]], [[TMP156]]
-; CHECK-NEXT: [[TMP164:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0
-; CHECK-NEXT: [[TMP165:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1
-; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[TMP165]], [[TMP164]]
-; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP164]], [[TMP165]]
+; CHECK-NEXT: [[TMP162:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP163]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP161]], [[TMP160]]
+; CHECK-NEXT: [[TMP164:%.*]] = sub <2 x i32> [[TMP163]], [[TMP156]]
+; CHECK-NEXT: [[TMP173:%.*]] = extractelement <2 x i32> [[TMP164]], i32 0
+; CHECK-NEXT: [[TMP174:%.*]] = extractelement <2 x i32> [[TMP164]], i32 1
+; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[TMP174]], [[TMP173]]
+; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP173]], [[TMP174]]
; CHECK-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_1]], [[ADD105_1]]
-; CHECK-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP77]]
-; CHECK-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP144]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP107]]
+; CHECK-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP101]], <2 x i32> [[TMP144]], <2 x i32> <i32 1, i32 3>
; CHECK-NEXT: [[TMP167:%.*]] = lshr <2 x i32> [[TMP166]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP168:%.*]] = and <2 x i32> [[TMP167]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP169:%.*]] = mul <2 x i32> [[TMP168]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP170:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_1]], i32 0
-; CHECK-NEXT: [[TMP171:%.*]] = shufflevector <2 x i32> [[TMP170]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP172:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_1]], i32 0
-; CHECK-NEXT: [[TMP173:%.*]] = shufflevector <2 x i32> [[TMP172]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP174:%.*]] = add <2 x i32> [[TMP171]], [[TMP173]]
-; CHECK-NEXT: [[TMP175:%.*]] = sub <2 x i32> [[TMP171]], [[TMP173]]
-; CHECK-NEXT: [[TMP176:%.*]] = shufflevector <2 x i32> [[TMP174]], <2 x i32> [[TMP175]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP177:%.*]] = add <2 x i32> [[TMP169]], [[TMP176]]
+; CHECK-NEXT: [[TMP171:%.*]] = shufflevector <2 x i32> [[TMP172]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP208:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
+; CHECK-NEXT: [[TMP209:%.*]] = shufflevector <2 x i32> [[TMP208]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP282:%.*]] = add <2 x i32> [[TMP171]], [[TMP209]]
+; CHECK-NEXT: [[TMP211:%.*]] = sub <2 x i32> [[TMP171]], [[TMP209]]
+; CHECK-NEXT: [[TMP283:%.*]] = shufflevector <2 x i32> [[TMP282]], <2 x i32> [[TMP211]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP177:%.*]] = add <2 x i32> [[TMP169]], [[TMP283]]
; CHECK-NEXT: [[TMP178:%.*]] = xor <2 x i32> [[TMP177]], [[TMP166]]
; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]]
; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP108]]
@@ -271,90 +271,90 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP180:%.*]] = extractelement <2 x i32> [[TMP178]], i32 1
; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP180]]
; CHECK-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
-; CHECK-NEXT: [[TMP181:%.*]] = shufflevector <2 x i32> [[TMP104]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
+; CHECK-NEXT: [[TMP181:%.*]] = shufflevector <2 x i32> [[TMP165]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
; CHECK-NEXT: [[TMP182:%.*]] = insertelement <2 x i32> [[TMP181]], i32 [[ADD44_2]], i32 0
-; CHECK-NEXT: [[TMP183:%.*]] = insertelement <2 x i32> [[TMP104]], i32 [[ADD46_2]], i32 0
+; CHECK-NEXT: [[TMP183:%.*]] = insertelement <2 x i32> [[TMP165]], i32 [[CONV]], i32 0
; CHECK-NEXT: [[TMP184:%.*]] = sub <2 x i32> [[TMP182]], [[TMP183]]
-; CHECK-NEXT: [[TMP185:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP186:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP185:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP257]], <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: [[TMP186:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP257]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[TMP187:%.*]] = sub <2 x i32> [[TMP185]], [[TMP186]]
; CHECK-NEXT: [[TMP188:%.*]] = extractelement <2 x i32> [[TMP184]], i32 0
; CHECK-NEXT: [[TMP189:%.*]] = extractelement <2 x i32> [[TMP187]], i32 0
; CHECK-NEXT: [[TMP190:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP184]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP189]], [[TMP188]]
+; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[TMP189]], [[TMP188]]
; CHECK-NEXT: [[TMP191:%.*]] = extractelement <2 x i32> [[TMP184]], i32 1
; CHECK-NEXT: [[TMP192:%.*]] = extractelement <2 x i32> [[TMP187]], i32 1
; CHECK-NEXT: [[TMP193:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP184]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[TMP192]], [[TMP191]]
+; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP192]], [[TMP191]]
; CHECK-NEXT: [[TMP194:%.*]] = sub <2 x i32> [[TMP184]], [[TMP187]]
-; CHECK-NEXT: [[TMP195:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
-; CHECK-NEXT: [[TMP196:%.*]] = shufflevector <2 x i32> [[TMP195]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
+; CHECK-NEXT: [[TMP244:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
+; CHECK-NEXT: [[TMP245:%.*]] = shufflevector <2 x i32> [[TMP244]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_4]], i32 0
; CHECK-NEXT: [[TMP198:%.*]] = shufflevector <2 x i32> [[TMP197]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP199:%.*]] = add <2 x i32> [[TMP196]], [[TMP198]]
-; CHECK-NEXT: [[TMP200:%.*]] = sub <2 x i32> [[TMP196]], [[TMP198]]
-; CHECK-NEXT: [[TMP201:%.*]] = shufflevector <2 x i32> [[TMP199]], <2 x i32> [[TMP200]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP202:%.*]] = extractelement <2 x i32> [[TMP194]], i32 0
+; CHECK-NEXT: [[TMP246:%.*]] = add <2 x i32> [[TMP245]], [[TMP198]]
+; CHECK-NEXT: [[TMP247:%.*]] = sub <2 x i32> [[TMP245]], [[TMP198]]
+; CHECK-NEXT: [[TMP248:%.*]] = shufflevector <2 x i32> [[TMP246]], <2 x i32> [[TMP247]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP215:%.*]] = extractelement <2 x i32> [[TMP194]], i32 0
; CHECK-NEXT: [[TMP203:%.*]] = extractelement <2 x i32> [[TMP194]], i32 1
-; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[TMP202]], [[TMP203]]
-; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[TMP203]], [[TMP202]]
-; CHECK-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_2]], [[ADD105_2]]
+; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[TMP215]], [[TMP203]]
+; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[TMP203]], [[TMP215]]
+; CHECK-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_2]]
; CHECK-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
-; CHECK-NEXT: [[TMP204:%.*]] = add <2 x i32> [[TMP149]], [[TMP201]]
-; CHECK-NEXT: [[TMP205:%.*]] = xor <2 x i32> [[TMP204]], [[TMP110]]
-; CHECK-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP106]], 15
+; CHECK-NEXT: [[TMP266:%.*]] = add <2 x i32> [[TMP149]], [[TMP248]]
+; CHECK-NEXT: [[TMP267:%.*]] = xor <2 x i32> [[TMP266]], [[TMP110]]
+; CHECK-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP238]], 15
; CHECK-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
; CHECK-NEXT: [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
; CHECK-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
-; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP106]]
+; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP238]]
; CHECK-NEXT: [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
-; CHECK-NEXT: [[TMP206:%.*]] = extractelement <2 x i32> [[TMP205]], i32 0
+; CHECK-NEXT: [[TMP206:%.*]] = extractelement <2 x i32> [[TMP267]], i32 0
; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP206]]
-; CHECK-NEXT: [[TMP207:%.*]] = extractelement <2 x i32> [[TMP205]], i32 1
+; CHECK-NEXT: [[TMP207:%.*]] = extractelement <2 x i32> [[TMP267]], i32 1
; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP207]]
; CHECK-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
-; CHECK-NEXT: [[TMP208:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB45_2]], i32 0
-; CHECK-NEXT: [[TMP209:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB47_2]], i32 0
-; CHECK-NEXT: [[TMP210:%.*]] = sub <2 x i32> [[TMP208]], [[TMP209]]
-; CHECK-NEXT: [[TMP211:%.*]] = shufflevector <2 x i32> [[TMP73]], <2 x i32> [[TMP144]], <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: [[TMP221:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB45_2]], i32 0
+; CHECK-NEXT: [[TMP222:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB47_2]], i32 0
+; CHECK-NEXT: [[TMP210:%.*]] = sub <2 x i32> [[TMP221]], [[TMP222]]
+; CHECK-NEXT: [[TMP225:%.*]] = shufflevector <2 x i32> [[TMP73]], <2 x i32> [[TMP144]], <2 x i32> <i32 1, i32 2>
; CHECK-NEXT: [[TMP212:%.*]] = shufflevector <2 x i32> [[TMP73]], <2 x i32> [[TMP144]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP213:%.*]] = sub <2 x i32> [[TMP211]], [[TMP212]]
+; CHECK-NEXT: [[TMP226:%.*]] = sub <2 x i32> [[TMP225]], [[TMP212]]
; CHECK-NEXT: [[TMP214:%.*]] = extractelement <2 x i32> [[TMP210]], i32 0
-; CHECK-NEXT: [[TMP215:%.*]] = extractelement <2 x i32> [[TMP213]], i32 0
-; CHECK-NEXT: [[TMP216:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP210]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[TMP215]], [[TMP214]]
+; CHECK-NEXT: [[TMP227:%.*]] = extractelement <2 x i32> [[TMP226]], i32 0
+; CHECK-NEXT: [[TMP216:%.*]] = shufflevector <2 x i32> [[TMP226]], <2 x i32> [[TMP210]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[TMP227]], [[TMP214]]
; CHECK-NEXT: [[TMP217:%.*]] = extractelement <2 x i32> [[TMP210]], i32 1
-; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP213]], i32 1
-; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP210]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD78_3:%.*]] = add i32 [[TMP218]], [[TMP217]]
-; CHECK-NEXT: [[TMP220:%.*]] = sub <2 x i32> [[TMP210]], [[TMP213]]
-; CHECK-NEXT: [[TMP221:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_3]], i32 0
-; CHECK-NEXT: [[TMP222:%.*]] = shufflevector <2 x i32> [[TMP221]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP226]], i32 1
+; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP226]], <2 x i32> [[TMP210]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[SUB59:%.*]] = add i32 [[TMP218]], [[TMP217]]
+; CHECK-NEXT: [[TMP220:%.*]] = sub <2 x i32> [[TMP210]], [[TMP226]]
+; CHECK-NEXT: [[TMP274:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59]], i32 0
+; CHECK-NEXT: [[TMP275:%.*]] = shufflevector <2 x i32> [[TMP274]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP223:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
; CHECK-NEXT: [[TMP224:%.*]] = shufflevector <2 x i32> [[TMP223]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP225:%.*]] = add <2 x i32> [[TMP222]], [[TMP224]]
-; CHECK-NEXT: [[TMP226:%.*]] = sub <2 x i32> [[TMP222]], [[TMP224]]
-; CHECK-NEXT: [[TMP227:%.*]] = shufflevector <2 x i32> [[TMP225]], <2 x i32> [[TMP226]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP276:%.*]] = add <2 x i32> [[TMP275]], [[TMP224]]
+; CHECK-NEXT: [[TMP277:%.*]] = sub <2 x i32> [[TMP275]], [[TMP224]]
+; CHECK-NEXT: [[TMP278:%.*]] = shufflevector <2 x i32> [[TMP276]], <2 x i32> [[TMP277]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[TMP228:%.*]] = extractelement <2 x i32> [[TMP220]], i32 0
; CHECK-NEXT: [[TMP229:%.*]] = extractelement <2 x i32> [[TMP220]], i32 1
; CHECK-NEXT: [[ADD105_3:%.*]] = add i32 [[TMP228]], [[TMP229]]
; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[TMP229]], [[TMP228]]
-; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_3]], [[ADD105_3]]
-; CHECK-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV]]
-; CHECK-NEXT: [[TMP230:%.*]] = lshr <2 x i32> [[TMP79]], <i32 15, i32 15>
+; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_5]], [[ADD105_3]]
+; CHECK-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV1]]
+; CHECK-NEXT: [[TMP230:%.*]] = lshr <2 x i32> [[TMP102]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP231:%.*]] = and <2 x i32> [[TMP230]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP232:%.*]] = mul <2 x i32> [[TMP231]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP233:%.*]] = add <2 x i32> [[TMP232]], [[TMP227]]
-; CHECK-NEXT: [[TMP234:%.*]] = xor <2 x i32> [[TMP233]], [[TMP79]]
+; CHECK-NEXT: [[TMP286:%.*]] = add <2 x i32> [[TMP232]], [[TMP278]]
+; CHECK-NEXT: [[TMP287:%.*]] = xor <2 x i32> [[TMP286]], [[TMP102]]
; CHECK-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
; CHECK-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
; CHECK-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
; CHECK-NEXT: [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]]
; CHECK-NEXT: [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]]
; CHECK-NEXT: [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]]
-; CHECK-NEXT: [[TMP235:%.*]] = extractelement <2 x i32> [[TMP234]], i32 0
+; CHECK-NEXT: [[TMP235:%.*]] = extractelement <2 x i32> [[TMP287]], i32 0
; CHECK-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP235]]
-; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP234]], i32 1
+; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP287]], i32 1
; CHECK-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP236]]
; CHECK-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]]
; CHECK-NEXT: ret i32 [[ADD113_3]]
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll
new file mode 100644
index 0000000..1166b1f
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr="+v" < %s -slp-threshold=-5 | FileCheck %s
+
+@h = global [16 x i64] zeroinitializer
+
+define void @test() {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store <2 x i64> <i64 4294967295, i64 0>, ptr @h, align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %sext.0 = sext i8 0 to i32
+ %sext.1 = sext i8 0 to i32
+
+ %lshr.0 = lshr i32 0, %sext.0
+ %lshr.1 = lshr i32 0, %sext.1
+
+ %or.0 = or i32 %lshr.0, -1
+ %or.1 = or i32 %lshr.1, 0
+
+ %zext.0 = zext i32 %or.0 to i64
+ %zext.1 = zext i32 %or.1 to i64
+
+ store i64 %zext.0, ptr @h, align 8
+ store i64 %zext.1, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 1), align 8
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll
new file mode 100644
index 0000000..0d5c644
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr="+v" < %s | FileCheck %s
+
+define i32 @test() {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i8> zeroinitializer, i64 0
+; CHECK-NEXT: [[CONV5:%.*]] = sext i8 [[VECTOR_RECUR_EXTRACT]] to i32
+; CHECK-NEXT: store i32 [[CONV5]], ptr getelementptr ([0 x i32], ptr null, i64 0, i64 -14), align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr getelementptr ([9 x i8], ptr null, i64 -2, i64 5), align 1
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i8> [[TMP0]], zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i16>
+; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr getelementptr ([0 x i16], ptr null, i64 0, i64 -14), align 2
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP0]], i32 0
+; CHECK-NEXT: [[CONV5_1:%.*]] = sext i8 [[TMP3]] to i32
+; CHECK-NEXT: store i32 [[CONV5_1]], ptr getelementptr ([0 x i32], ptr null, i64 0, i64 -13), align 4
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %vector.recur.extract = extractelement <vscale x 4 x i8> zeroinitializer, i64 0
+ %0 = load i8, ptr getelementptr ([9 x i8], ptr null, i64 -2, i64 5), align 1
+ %tobool1.not = icmp ne i8 %0, 0
+ %conv2 = zext i1 %tobool1.not to i16
+ store i16 %conv2, ptr getelementptr ([0 x i16], ptr null, i64 0, i64 -14), align 2
+ %conv5 = sext i8 %vector.recur.extract to i32
+ store i32 %conv5, ptr getelementptr ([0 x i32], ptr null, i64 0, i64 -14), align 4
+ %1 = load i8, ptr getelementptr ([9 x i8], ptr null, i64 -2, i64 6), align 1
+ %tobool1.not.1 = icmp ne i8 %1, 0
+ %conv2.1 = zext i1 %tobool1.not.1 to i16
+ store i16 %conv2.1, ptr getelementptr ([0 x i16], ptr null, i64 0, i64 -13), align 2
+ %conv5.1 = sext i8 %0 to i32
+ store i32 %conv5.1, ptr getelementptr ([0 x i32], ptr null, i64 0, i64 -13), align 4
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll
new file mode 100644
index 0000000..6388cc2d
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=slp-vectorizer -S | FileCheck %s
+
+target triple = "riscv64-unknown-linux-gnu"
+
+define void @partial_vec_invalid_cost() #0 {
+; CHECK-LABEL: define void @partial_vec_invalid_cost(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> zeroinitializer)
+; CHECK-NEXT: [[OP_RDX3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: [[STORE_THIS:%.*]] = zext i32 [[OP_RDX3]] to i96
+; CHECK-NEXT: store i96 [[STORE_THIS]], ptr null, align 16
+; CHECK-NEXT: ret void
+;
+entry:
+
+ %lshr.1 = lshr i96 0, 0 ; These ops
+ %lshr.2 = lshr i96 0, 0 ; return an
+ %add.0 = add i96 0, 0 ; invalid
+ %add.1 = add i96 0, 0 ; vector cost.
+
+ %trunc.i96.1 = trunc i96 %lshr.1 to i32 ; These ops
+ %trunc.i96.2 = trunc i96 %lshr.2 to i32 ; return an
+ %trunc.i96.3 = trunc i96 %add.0 to i32 ; invalid
+ %trunc.i96.4 = trunc i96 %add.1 to i32 ; vector cost.
+
+ %or.0 = or i32 %trunc.i96.1, %trunc.i96.2
+ %or.1 = or i32 %or.0, %trunc.i96.3
+ %or.2 = or i32 %or.1, %trunc.i96.4
+
+ %zext.0 = zext i1 0 to i32 ; These
+ %zext.1 = zext i1 0 to i32 ; ops
+ %zext.2 = zext i1 0 to i32 ; are
+ %zext.3 = zext i1 0 to i32 ; vectorized
+
+ %or.3 = or i32 %or.2, %zext.0 ; users
+ %or.4 = or i32 %or.3, %zext.1 ; of
+ %or.5 = or i32 %or.4, %zext.2 ; vectorized
+ %or.6 = or i32 %or.5, %zext.3 ; ops
+
+ %store.this = zext i32 %or.6 to i96
+
+ store i96 %store.this, ptr null, align 16
+ ret void
+}
+
+attributes #0 = { "target-features"="+v" }
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll
new file mode 100644
index 0000000..7771e83
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=riscv64-unknown-linux-gnu -mattr="+v" --passes=slp-vectorizer < %s | FileCheck %s
+
+define i32 @test(ptr %0, ptr %1) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> <i8 1, i8 1, i8 1, i8 1>)
+; CHECK-NEXT: [[TMP3:%.*]] = sext i8 [[TMP2]] to i32
+; CHECK-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP3]], [[LOAD_5]]
+; CHECK-NEXT: ret i32 [[OP_RDX]]
+;
+entry:
+ %zext.0 = zext i8 1 to i32
+ %zext.1 = zext i8 1 to i32
+ %zext.2 = zext i8 1 to i32
+ %zext.3 = zext i8 1 to i32
+ %select.zext.0 = select i1 false, i32 -1, i32 %zext.0
+ %select.zext.1 = select i1 false, i32 0, i32 %zext.1
+ %select.zext.2 = select i1 false, i32 0, i32 %zext.2
+ %select.zext.3 = select i1 false, i32 0, i32 %zext.3
+
+ %load.5 = load i32, ptr %1, align 4
+
+ %and.0 = and i32 %load.5, %select.zext.0
+ %and.1 = and i32 %and.0, %select.zext.1
+ %and.2 = and i32 %and.1, %select.zext.2
+ %and.3 = and i32 %and.2, %select.zext.3
+
+ ret i32 %and.3
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
index 000e7a5..500f106 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
@@ -802,9 +802,10 @@ define i64 @red_zext_ld_4xi64(ptr %ptr) {
; CHECK-LABEL: @red_zext_ld_4xi64(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PTR:%.*]], align 1
-; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i64>
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: ret i64 [[TMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i64
+; CHECK-NEXT: ret i64 [[TMP3]]
;
entry:
%ld0 = load i8, ptr %ptr
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll
new file mode 100644
index 0000000..26f3fca
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll
@@ -0,0 +1,263 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=riscv64-unknown-linux-gnu -slp-threshold=-100 -mattr=+v < %s | FileCheck %s
+
+define void @test1() personality ptr null {
+; CHECK-LABEL: define void @test1(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] personality ptr null {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CALL33:%.*]] = invoke ptr null(i64 0, ptr null)
+; CHECK-NEXT: to label [[INVOKE_CONT32:%.*]] unwind label [[LPAD31_LOOPEXIT:%.*]]
+; CHECK: invoke.cont32:
+; CHECK-NEXT: invoke void null(ptr null, ptr null)
+; CHECK-NEXT: to label [[INVOKE_CONT37:%.*]] unwind label [[LPAD34_LOOPEXIT:%.*]]
+; CHECK: invoke.cont37:
+; CHECK-NEXT: unreachable
+; CHECK: lpad31.loopexit:
+; CHECK-NEXT: [[LPAD_LOOPEXIT:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[EHCLEANUP47:%.*]]
+; CHECK: lpad34.loopexit:
+; CHECK-NEXT: [[DOTLCSSA101:%.*]] = phi ptr [ null, [[INVOKE_CONT32]] ]
+; CHECK-NEXT: [[CALL33_LCSSA96:%.*]] = phi ptr [ [[CALL33]], [[INVOKE_CONT32]] ]
+; CHECK-NEXT: [[LPAD_LOOPEXIT56:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[LPAD34_BODY:%.*]]
+; CHECK: lpad34.loopexit.split-lp:
+; CHECK-NEXT: [[LPAD_LOOPEXIT_SPLIT_LP57:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[LPAD34_BODY]]
+; CHECK: lpad34.body:
+; CHECK-NEXT: [[TMP0:%.*]] = phi ptr [ [[DOTLCSSA101]], [[LPAD34_LOOPEXIT]] ], [ null, [[LPAD34_LOOPEXIT_SPLIT_LP:%.*]] ]
+; CHECK-NEXT: [[CALL3399:%.*]] = phi ptr [ [[CALL33_LCSSA96]], [[LPAD34_LOOPEXIT]] ], [ null, [[LPAD34_LOOPEXIT_SPLIT_LP]] ]
+; CHECK-NEXT: br label [[EHCLEANUP47]]
+; CHECK: ehcleanup47:
+; CHECK-NEXT: resume { ptr, i32 } zeroinitializer
+;
+entry:
+ %call33 = invoke ptr null(i64 0, ptr null)
+ to label %invoke.cont32 unwind label %lpad31.loopexit
+
+invoke.cont32:
+ invoke void null(ptr null, ptr null)
+ to label %invoke.cont37 unwind label %lpad34.loopexit
+
+invoke.cont37:
+ unreachable
+
+lpad31.loopexit:
+ %lpad.loopexit = landingpad { ptr, i32 }
+ cleanup
+ br label %ehcleanup47
+
+lpad34.loopexit:
+ %.lcssa101 = phi ptr [ null, %invoke.cont32 ]
+ %call33.lcssa96 = phi ptr [ %call33, %invoke.cont32 ]
+ %lpad.loopexit56 = landingpad { ptr, i32 }
+ cleanup
+ br label %lpad34.body
+
+lpad34.loopexit.split-lp:
+ %lpad.loopexit.split-lp57 = landingpad { ptr, i32 }
+ cleanup
+ br label %lpad34.body
+
+lpad34.body:
+ %0 = phi ptr [ %.lcssa101, %lpad34.loopexit ], [ null, %lpad34.loopexit.split-lp ]
+ %call3399 = phi ptr [ %call33.lcssa96, %lpad34.loopexit ], [ null, %lpad34.loopexit.split-lp ]
+ br label %ehcleanup47
+
+ehcleanup47:
+ resume { ptr, i32 } zeroinitializer
+}
+
+define i32 @test2(i64 %idx.ext.i48.pre-phi) {
+; CHECK-LABEL: define i32 @test2(
+; CHECK-SAME: i64 [[IDX_EXT_I48_PRE_PHI:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[DO_ACTION:%.*]]
+; CHECK: do_action:
+; CHECK-NEXT: switch i32 0, label [[SW_DEFAULT:%.*]] [
+; CHECK-NEXT: i32 1, label [[CLEANUP185:%.*]]
+; CHECK-NEXT: i32 2, label [[CLEANUP185]]
+; CHECK-NEXT: i32 0, label [[CLEANUP185]]
+; CHECK-NEXT: i32 4, label [[CLEANUP185]]
+; CHECK-NEXT: i32 5, label [[CLEANUP185]]
+; CHECK-NEXT: i32 6, label [[CLEANUP185]]
+; CHECK-NEXT: i32 7, label [[CLEANUP185]]
+; CHECK-NEXT: i32 8, label [[CLEANUP185]]
+; CHECK-NEXT: i32 9, label [[CLEANUP185]]
+; CHECK-NEXT: i32 10, label [[CLEANUP185]]
+; CHECK-NEXT: i32 11, label [[CLEANUP185]]
+; CHECK-NEXT: i32 12, label [[CLEANUP185]]
+; CHECK-NEXT: i32 13, label [[CLEANUP185]]
+; CHECK-NEXT: i32 14, label [[CLEANUP185]]
+; CHECK-NEXT: i32 15, label [[CLEANUP185]]
+; CHECK-NEXT: i32 16, label [[CLEANUP185]]
+; CHECK-NEXT: i32 17, label [[CLEANUP185]]
+; CHECK-NEXT: i32 18, label [[CLEANUP185]]
+; CHECK-NEXT: i32 19, label [[CLEANUP185]]
+; CHECK-NEXT: i32 20, label [[CLEANUP185]]
+; CHECK-NEXT: i32 21, label [[CLEANUP185]]
+; CHECK-NEXT: i32 22, label [[CLEANUP185]]
+; CHECK-NEXT: i32 23, label [[CLEANUP185]]
+; CHECK-NEXT: i32 24, label [[CLEANUP185]]
+; CHECK-NEXT: i32 25, label [[CLEANUP185]]
+; CHECK-NEXT: i32 26, label [[CLEANUP185]]
+; CHECK-NEXT: i32 27, label [[CLEANUP185]]
+; CHECK-NEXT: i32 28, label [[CLEANUP185]]
+; CHECK-NEXT: i32 29, label [[CLEANUP185]]
+; CHECK-NEXT: i32 30, label [[CLEANUP185]]
+; CHECK-NEXT: i32 31, label [[CLEANUP185]]
+; CHECK-NEXT: i32 32, label [[CLEANUP185]]
+; CHECK-NEXT: i32 33, label [[CLEANUP185]]
+; CHECK-NEXT: i32 34, label [[CLEANUP185]]
+; CHECK-NEXT: i32 35, label [[CLEANUP185]]
+; CHECK-NEXT: i32 36, label [[CLEANUP185]]
+; CHECK-NEXT: i32 37, label [[CLEANUP185]]
+; CHECK-NEXT: i32 38, label [[CLEANUP185]]
+; CHECK-NEXT: i32 39, label [[CLEANUP185]]
+; CHECK-NEXT: i32 40, label [[CLEANUP185]]
+; CHECK-NEXT: i32 41, label [[CLEANUP185]]
+; CHECK-NEXT: i32 42, label [[CLEANUP185]]
+; CHECK-NEXT: i32 43, label [[CLEANUP185]]
+; CHECK-NEXT: i32 44, label [[CLEANUP185]]
+; CHECK-NEXT: i32 45, label [[CLEANUP185]]
+; CHECK-NEXT: i32 46, label [[CLEANUP185]]
+; CHECK-NEXT: i32 47, label [[CLEANUP185]]
+; CHECK-NEXT: i32 48, label [[CLEANUP185]]
+; CHECK-NEXT: i32 49, label [[CLEANUP185]]
+; CHECK-NEXT: i32 50, label [[CLEANUP185]]
+; CHECK-NEXT: i32 51, label [[CLEANUP185]]
+; CHECK-NEXT: i32 52, label [[CLEANUP185]]
+; CHECK-NEXT: i32 53, label [[CLEANUP185]]
+; CHECK-NEXT: i32 54, label [[CLEANUP185]]
+; CHECK-NEXT: i32 55, label [[CLEANUP185]]
+; CHECK-NEXT: i32 56, label [[CLEANUP185]]
+; CHECK-NEXT: i32 57, label [[DO_ACTION]]
+; CHECK-NEXT: i32 58, label [[CLEANUP185]]
+; CHECK-NEXT: i32 59, label [[CLEANUP185]]
+; CHECK-NEXT: i32 60, label [[DO_ACTION]]
+; CHECK-NEXT: i32 61, label [[DO_ACTION]]
+; CHECK-NEXT: i32 62, label [[CLEANUP185]]
+; CHECK-NEXT: i32 70, label [[SW_BB175:%.*]]
+; CHECK-NEXT: i32 64, label [[CLEANUP185]]
+; CHECK-NEXT: i32 65, label [[DO_ACTION]]
+; CHECK-NEXT: i32 66, label [[DO_ACTION]]
+; CHECK-NEXT: i32 67, label [[CLEANUP185]]
+; CHECK-NEXT: i32 72, label [[CLEANUP185]]
+; CHECK-NEXT: i32 69, label [[DO_ACTION]]
+; CHECK-NEXT: i32 71, label [[CLEANUP185]]
+; CHECK-NEXT: ]
+; CHECK: yy_get_previous_state.exit.loopexit:
+; CHECK-NEXT: br label [[YY_FIND_ACTION_BACKEDGE:%.*]]
+; CHECK: yy_find_action.backedge:
+; CHECK-NEXT: [[YY_BP_1_BE:%.*]] = phi ptr [ [[ADD_PTR_I49:%.*]], [[SW_BB175]] ], [ null, [[YY_GET_PREVIOUS_STATE_EXIT_LOOPEXIT:%.*]] ]
+; CHECK-NEXT: [[YY_CP_2_BE:%.*]] = phi ptr [ [[ARRAYIDX178:%.*]], [[SW_BB175]] ], [ null, [[YY_GET_PREVIOUS_STATE_EXIT_LOOPEXIT]] ]
+; CHECK-NEXT: br label [[DO_ACTION]]
+; CHECK: sw.bb175:
+; CHECK-NEXT: [[ARRAYIDX178]] = getelementptr i8, ptr null, i64 0
+; CHECK-NEXT: [[ADD_PTR_I49]] = getelementptr i8, ptr null, i64 [[IDX_EXT_I48_PRE_PHI]]
+; CHECK-NEXT: [[CMP5_I50:%.*]] = icmp ult ptr [[ADD_PTR_I49]], [[ARRAYIDX178]]
+; CHECK-NEXT: br label [[YY_FIND_ACTION_BACKEDGE]]
+; CHECK: sw.default:
+; CHECK-NEXT: unreachable
+; CHECK: cleanup185:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br label %do_action
+
+do_action:
+ switch i32 0, label %sw.default [
+ i32 1, label %cleanup185
+ i32 2, label %cleanup185
+ i32 0, label %cleanup185
+ i32 4, label %cleanup185
+ i32 5, label %cleanup185
+ i32 6, label %cleanup185
+ i32 7, label %cleanup185
+ i32 8, label %cleanup185
+ i32 9, label %cleanup185
+ i32 10, label %cleanup185
+ i32 11, label %cleanup185
+ i32 12, label %cleanup185
+ i32 13, label %cleanup185
+ i32 14, label %cleanup185
+ i32 15, label %cleanup185
+ i32 16, label %cleanup185
+ i32 17, label %cleanup185
+ i32 18, label %cleanup185
+ i32 19, label %cleanup185
+ i32 20, label %cleanup185
+ i32 21, label %cleanup185
+ i32 22, label %cleanup185
+ i32 23, label %cleanup185
+ i32 24, label %cleanup185
+ i32 25, label %cleanup185
+ i32 26, label %cleanup185
+ i32 27, label %cleanup185
+ i32 28, label %cleanup185
+ i32 29, label %cleanup185
+ i32 30, label %cleanup185
+ i32 31, label %cleanup185
+ i32 32, label %cleanup185
+ i32 33, label %cleanup185
+ i32 34, label %cleanup185
+ i32 35, label %cleanup185
+ i32 36, label %cleanup185
+ i32 37, label %cleanup185
+ i32 38, label %cleanup185
+ i32 39, label %cleanup185
+ i32 40, label %cleanup185
+ i32 41, label %cleanup185
+ i32 42, label %cleanup185
+ i32 43, label %cleanup185
+ i32 44, label %cleanup185
+ i32 45, label %cleanup185
+ i32 46, label %cleanup185
+ i32 47, label %cleanup185
+ i32 48, label %cleanup185
+ i32 49, label %cleanup185
+ i32 50, label %cleanup185
+ i32 51, label %cleanup185
+ i32 52, label %cleanup185
+ i32 53, label %cleanup185
+ i32 54, label %cleanup185
+ i32 55, label %cleanup185
+ i32 56, label %cleanup185
+ i32 57, label %do_action
+ i32 58, label %cleanup185
+ i32 59, label %cleanup185
+ i32 60, label %do_action
+ i32 61, label %do_action
+ i32 62, label %cleanup185
+ i32 70, label %sw.bb175
+ i32 64, label %cleanup185
+ i32 65, label %do_action
+ i32 66, label %do_action
+ i32 67, label %cleanup185
+ i32 72, label %cleanup185
+ i32 69, label %do_action
+ i32 71, label %cleanup185
+ ]
+
+yy_get_previous_state.exit.loopexit:
+ br label %yy_find_action.backedge
+
+yy_find_action.backedge:
+ %yy_bp.1.be = phi ptr [ %add.ptr.i49, %sw.bb175 ], [ null, %yy_get_previous_state.exit.loopexit ]
+ %yy_cp.2.be = phi ptr [ %arrayidx178, %sw.bb175 ], [ null, %yy_get_previous_state.exit.loopexit ]
+ br label %do_action
+
+sw.bb175:
+ %arrayidx178 = getelementptr i8, ptr null, i64 0
+ %add.ptr.i49 = getelementptr i8, ptr null, i64 %idx.ext.i48.pre-phi
+ %cmp5.i50 = icmp ult ptr %add.ptr.i49, %arrayidx178
+ br label %yy_find_action.backedge
+
+sw.default:
+ unreachable
+
+cleanup185:
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll
new file mode 100644
index 0000000..2d69c7c
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr="+v" < %s | FileCheck %s
+
+@c = global [12 x i64] zeroinitializer
+
+define i32 @test() {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i64(ptr align 8 @c, i64 24, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[TMP0]] to <4 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], <i32 65535, i32 65535, i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP3:%.*]] = xor <4 x i32> [[TMP2]], <i32 65535, i32 65535, i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP5]], i32 1)
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+entry:
+ %0 = load i64, ptr @c, align 8
+ %conv = trunc i64 %0 to i32
+ %conv3 = and i32 %conv, 65535
+ %conv4 = xor i32 %conv3, 65535
+ %.conv4 = tail call i32 @llvm.umax.i32(i32 1, i32 %conv4)
+ %1 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 3), align 8
+ %conv.1 = trunc i64 %1 to i32
+ %conv3.1 = and i32 %conv.1, 65535
+ %conv4.1 = xor i32 %conv3.1, 65535
+ %.conv4.1 = tail call i32 @llvm.umax.i32(i32 %.conv4, i32 %conv4.1)
+ %2 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 6), align 8
+ %conv.2 = trunc i64 %2 to i32
+ %conv3.2 = and i32 %conv.2, 65535
+ %conv4.2 = xor i32 %conv3.2, 65535
+ %.conv4.2 = tail call i32 @llvm.umax.i32(i32 %.conv4.1, i32 %conv4.2)
+ %3 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 9), align 8
+ %conv.3 = trunc i64 %3 to i32
+ %conv3.3 = and i32 %conv.3, 65535
+ %conv4.3 = xor i32 %conv3.3, 65535
+ %.conv4.3 = tail call i32 @llvm.umax.i32(i32 %.conv4.2, i32 %conv4.3)
+ ret i32 %.conv4.3
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/SystemZ/ext-not-resized-op-resized.ll b/llvm/test/Transforms/SLPVectorizer/SystemZ/ext-not-resized-op-resized.ll
new file mode 100644
index 0000000..a7bb272
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/SystemZ/ext-not-resized-op-resized.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=systemz-unknown -mcpu=z13 < %s | FileCheck %s
+
+define void @test(i64 %0, i1 %.cmp.i.2, i1 %1, ptr %a) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: i64 [[TMP0:%.*]], i1 [[DOTCMP_I_2:%.*]], i1 [[TMP1:%.*]], ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP3]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = lshr <4 x i64> [[TMP4]], <i64 63, i64 63, i64 63, i64 63>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i1> poison, i1 [[DOTCMP_I_2]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i1> [[TMP6]], i1 [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i1> [[TMP7]], <4 x i1> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 0>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i1> [[TMP7]], <4 x i1> poison, <4 x i32> <i32 1, i32 1, i32 0, i32 1>
+; CHECK-NEXT: [[TMP10:%.*]] = trunc <4 x i64> [[TMP5]] to <4 x i1>
+; CHECK-NEXT: [[TMP11:%.*]] = select <4 x i1> [[TMP9]], <4 x i1> [[TMP10]], <4 x i1> [[TMP8]]
+; CHECK-NEXT: [[TMP12:%.*]] = zext <4 x i1> [[TMP11]] to <4 x i32>
+; CHECK-NEXT: [[TMP13:%.*]] = xor <4 x i32> [[TMP12]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP13]])
+; CHECK-NEXT: store i32 [[TMP14]], ptr [[A]], align 4
+; CHECK-NEXT: ret void
+;
+ %.lobit.i.2 = lshr i64 %0, 63
+ %3 = zext i1 %.cmp.i.2 to i64
+ %4 = select i1 %1, i64 %.lobit.i.2, i64 %3
+ %5 = trunc i64 %4 to i32
+ %6 = xor i32 %5, 1
+ %.lobit.i.3 = lshr i64 %0, 63
+ %7 = zext i1 %.cmp.i.2 to i64
+ %8 = select i1 %1, i64 %.lobit.i.3, i64 %7
+ %9 = trunc i64 %8 to i32
+ %10 = xor i32 %9, 1
+ %11 = or i32 %10, %6
+ %.lobit.i.4 = lshr i64 %0, 63
+ %12 = zext i1 %1 to i64
+ %13 = select i1 %.cmp.i.2, i64 %.lobit.i.4, i64 %12
+ %14 = trunc i64 %13 to i32
+ %15 = xor i32 %14, 1
+ %16 = or i32 %15, %11
+ %.lobit.i.5 = lshr i64 %0, 63
+ %17 = zext i1 %.cmp.i.2 to i64
+ %18 = select i1 %1, i64 %.lobit.i.5, i64 %17
+ %19 = trunc i64 %18 to i32
+ %20 = xor i32 %19, 1
+ %21 = or i32 %20, %16
+ store i32 %21, ptr %a, align 4
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll b/llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll
new file mode 100644
index 0000000..7b4e2b0
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=systemz -mcpu=z15 %s | FileCheck %s
+
+define void @test(ptr %a, i8 %0, i16 %b.promoted.i) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[TMP0:%.*]], i16 [[B_PROMOTED_I:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0]] to i128
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i16> poison, i16 [[B_PROMOTED_I]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[TMP3]], <4 x i16> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i128> poison, i128 [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i128> [[TMP5]], <4 x i128> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = trunc <4 x i128> [[TMP6]] to <4 x i16>
+; CHECK-NEXT: [[TMP8:%.*]] = or <4 x i16> [[TMP4]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i16 [[TMP9]] to i64
+; CHECK-NEXT: [[OP_RDX:%.*]] = and i64 [[TMP11]], 1
+; CHECK-NEXT: store i64 [[OP_RDX]], ptr [[A]], align 8
+; CHECK-NEXT: ret void
+;
+ %2 = zext i8 %0 to i128
+ %3 = zext i16 %b.promoted.i to i128
+ %4 = or i128 %3, %2
+ %5 = trunc i128 %4 to i64
+ %6 = and i64 %5, 1
+ %7 = zext i16 %b.promoted.i to i128
+ %8 = or i128 %7, %2
+ %9 = trunc i128 %8 to i64
+ %10 = and i64 %6, %9
+ %11 = zext i16 %b.promoted.i to i128
+ %12 = or i128 %11, %2
+ %13 = trunc i128 %12 to i64
+ %14 = and i64 %10, %13
+ %15 = zext i16 %b.promoted.i to i128
+ %16 = or i128 %15, %2
+ %17 = trunc i128 %16 to i64
+ %18 = and i64 %14, %17
+ store i64 %18, ptr %a, align 8
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll
index 4565d49..05511f8 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/PR35777.ll
@@ -15,11 +15,12 @@ define { i64, i64 } @patatino(double %arg) {
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([6 x double], ptr @global, i64 0, i64 4), align 16
; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], [[TMP5]]
; CHECK-NEXT: [[TMP8:%.*]] = fptosi <2 x double> [[TMP7]] to <2 x i32>
-; CHECK-NEXT: [[TMP9:%.*]] = sext <2 x i32> [[TMP8]] to <2 x i64>
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i64> [[TMP9]], i32 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i32> [[TMP8]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
; CHECK-NEXT: [[T16:%.*]] = insertvalue { i64, i64 } undef, i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i64> [[TMP9]], i32 1
-; CHECK-NEXT: [[T17:%.*]] = insertvalue { i64, i64 } [[T16]], i64 [[TMP11]], 1
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i32> [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = sext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[T17:%.*]] = insertvalue { i64, i64 } [[T16]], i64 [[TMP12]], 1
; CHECK-NEXT: ret { i64, i64 } [[T17]]
;
bb:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll
new file mode 100644
index 0000000..27c9655
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-pc-windows-msvc19.34.0 < %s | FileCheck %s
+
+define void @test(ptr %0, i8 %1, i1 %cmp12.i) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[TMP0:%.*]], i8 [[TMP1:%.*]], i1 [[CMP12_I:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i1> poison, i1 [[CMP12_I]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i8> poison, i8 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP4]], <8 x i8> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: br label [[PRE:%.*]]
+; CHECK: pre:
+; CHECK-NEXT: [[TMP6:%.*]] = zext <8 x i8> [[TMP5]] to <8 x i32>
+; CHECK-NEXT: [[TMP7:%.*]] = call <8 x i32> @llvm.umax.v8i32(<8 x i32> [[TMP6]], <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
+; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i8>
+; CHECK-NEXT: [[TMP9:%.*]] = add <8 x i8> [[TMP8]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: [[TMP10:%.*]] = select <8 x i1> [[TMP3]], <8 x i8> [[TMP9]], <8 x i8> [[TMP5]]
+; CHECK-NEXT: store <8 x i8> [[TMP10]], ptr [[TMP0]], align 1
+; CHECK-NEXT: br label [[PRE]]
+;
+entry:
+ %idx11 = getelementptr i8, ptr %0, i64 1
+ %idx22 = getelementptr i8, ptr %0, i64 2
+ %idx33 = getelementptr i8, ptr %0, i64 3
+ %idx44 = getelementptr i8, ptr %0, i64 4
+ %idx55 = getelementptr i8, ptr %0, i64 5
+ %idx66 = getelementptr i8, ptr %0, i64 6
+ %idx77 = getelementptr i8, ptr %0, i64 7
+ br label %pre
+
+pre:
+ %conv.i = zext i8 %1 to i32
+ %2 = tail call i32 @llvm.umax.i32(i32 %conv.i, i32 1)
+ %.sroa.speculated.i = add i32 %2, 1
+ %intensity.0.i = select i1 %cmp12.i, i32 %.sroa.speculated.i, i32 %conv.i
+ %conv14.i = trunc i32 %intensity.0.i to i8
+ store i8 %conv14.i, ptr %0, align 1
+ %conv.i.1 = zext i8 %1 to i32
+ %3 = tail call i32 @llvm.umax.i32(i32 %conv.i.1, i32 1)
+ %ss1 = add i32 %3, 1
+ %ii1 = select i1 %cmp12.i, i32 %ss1, i32 %conv.i.1
+ %conv14.i.1 = trunc i32 %ii1 to i8
+ store i8 %conv14.i.1, ptr %idx11, align 1
+ %conv.i.2 = zext i8 %1 to i32
+ %4 = tail call i32 @llvm.umax.i32(i32 %conv.i.2, i32 1)
+ %ss2 = add i32 %4, 1
+ %ii2 = select i1 %cmp12.i, i32 %ss2, i32 %conv.i.2
+ %conv14.i.2 = trunc i32 %ii2 to i8
+ store i8 %conv14.i.2, ptr %idx22, align 1
+ %conv.i.3 = zext i8 %1 to i32
+ %5 = tail call i32 @llvm.umax.i32(i32 %conv.i.3, i32 1)
+ %ss3 = add i32 %5, 1
+ %ii3 = select i1 %cmp12.i, i32 %ss3, i32 %conv.i.3
+ %conv14.i.3 = trunc i32 %ii3 to i8
+ store i8 %conv14.i.3, ptr %idx33, align 1
+ %conv.i.4 = zext i8 %1 to i32
+ %6 = tail call i32 @llvm.umax.i32(i32 %conv.i.4, i32 1)
+ %ss4 = add i32 %6, 1
+ %ii4 = select i1 %cmp12.i, i32 %ss4, i32 %conv.i.4
+ %conv14.i.4 = trunc i32 %ii4 to i8
+ store i8 %conv14.i.4, ptr %idx44, align 1
+ %conv.i.5 = zext i8 %1 to i32
+ %7 = tail call i32 @llvm.umax.i32(i32 %conv.i.5, i32 1)
+ %ss5 = add i32 %7, 1
+ %ii5 = select i1 %cmp12.i, i32 %ss5, i32 %conv.i.5
+ %conv14.i.5 = trunc i32 %ii5 to i8
+ store i8 %conv14.i.5, ptr %idx55, align 1
+ %conv.i.6 = zext i8 %1 to i32
+ %8 = tail call i32 @llvm.umax.i32(i32 %conv.i.6, i32 1)
+ %ss6 = add i32 %8, 1
+ %ii6 = select i1 %cmp12.i, i32 %ss6, i32 %conv.i.6
+ %conv14.i.6 = trunc i32 %ii6 to i8
+ store i8 %conv14.i.6, ptr %idx66, align 1
+ %conv.i.7 = zext i8 %1 to i32
+ %9 = tail call i32 @llvm.umax.i32(i32 %conv.i.7, i32 1)
+ %ss7 = add i32 %9, 1
+ %ii7 = select i1 %cmp12.i, i32 %ss7, i32 %conv.i.7
+ %conv14.i.7 = trunc i32 %ii7 to i8
+ store i8 %conv14.i.7, ptr %idx77, align 1
+ br label %pre
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cmp-after-intrinsic-call-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/cmp-after-intrinsic-call-minbitwidth.ll
new file mode 100644
index 0000000..a05d4fd
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cmp-after-intrinsic-call-minbitwidth.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt --passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mcpu=cascadelake < %s | FileCheck %s
+
+define void @test() {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> zeroinitializer, <2 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i32> [[TMP1]], zeroinitializer
+; CHECK-NEXT: [[ADD:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[ADD]], 0
+; CHECK-NEXT: [[ADD45:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; CHECK-NEXT: [[ADD152:%.*]] = or i32 [[ADD45]], [[ADD]]
+; CHECK-NEXT: [[IDXPROM153:%.*]] = sext i32 [[ADD152]] to i64
+; CHECK-NEXT: [[ARRAYIDX154:%.*]] = getelementptr i8, ptr null, i64 [[IDXPROM153]]
+; CHECK-NEXT: [[CALL155:%.*]] = tail call i32 null(ptr null, i32 0, ptr [[ARRAYIDX154]], i32 0)
+; CHECK-NEXT: ret void
+;
+entry:
+ %conv = sext i16 0 to i32
+ %cmp.i = icmp sgt i32 0, %conv
+ %cond.i = tail call i32 @llvm.smin.i32(i32 %conv, i32 0)
+ %cond5.i = select i1 %cmp.i, i32 0, i32 %cond.i
+ %conv43 = sext i16 0 to i32
+ %cmp.i6193 = icmp sgt i32 0, %conv43
+ %cond.i6194 = tail call i32 @llvm.smin.i32(i32 %conv43, i32 0)
+ %cond5.i6195 = select i1 %cmp.i6193, i32 0, i32 %cond.i6194
+ %add = or i32 %cond5.i, 0
+ %shr = ashr i32 %add, 0
+ %add45 = or i32 %cond5.i6195, 0
+ %add152 = or i32 %add45, %add
+ %idxprom153 = sext i32 %add152 to i64
+ %arrayidx154 = getelementptr i8, ptr null, i64 %idxprom153
+ %call155 = tail call i32 null(ptr null, i32 0, ptr %arrayidx154, i32 0)
+ ret void
+}
+
+declare i32 @llvm.smin.i32(i32, i32)
+
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
index 12b227c..a153c3c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
@@ -25,8 +25,8 @@ define i32 @fn1() {
; CHECK-NEXT: store i32 [[AND]], ptr @a, align 4
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> <i32 poison, i32 0>, <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: switch i32 [[AND]], label [[IF_END:%.*]] [
-; CHECK-NEXT: i32 7, label [[SAVE_STATE_AND_RETURN]]
-; CHECK-NEXT: i32 0, label [[SAVE_STATE_AND_RETURN]]
+; CHECK-NEXT: i32 7, label [[SAVE_STATE_AND_RETURN]]
+; CHECK-NEXT: i32 0, label [[SAVE_STATE_AND_RETURN]]
; CHECK-NEXT: ]
; CHECK: if.end:
; CHECK-NEXT: br label [[SAVE_STATE_AND_RETURN]]
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/ext-int-reduced-not-operand.ll b/llvm/test/Transforms/SLPVectorizer/X86/ext-int-reduced-not-operand.ll
new file mode 100644
index 0000000..05534fa
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/ext-int-reduced-not-operand.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s
+
+define i64 @wombat() {
+; CHECK-LABEL: define i64 @wombat() {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, [[BB:%.*]] ], [ 0, [[BB1:%.*]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> poison, i32 [[PHI]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i64
+; CHECK-NEXT: [[OR:%.*]] = or i64 [[TMP4]], [[TMP6]]
+; CHECK-NEXT: ret i64 [[OR]]
+;
+bb:
+ br label %bb2
+
+bb1:
+ br label %bb2
+
+bb2:
+ %phi = phi i32 [ 0, %bb ], [ 0, %bb1 ]
+ %zext = zext i32 %phi to i64
+ %sext = sext i32 %phi to i64
+ %or = or i64 %zext, %sext
+ ret i64 %or
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll b/llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll
new file mode 100644
index 0000000..2acbe89
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux -mattr="-avx512pf,+avx512f,+avx512bw" -slp-threshold=-100 < %s | FileCheck %s
+
+define i1 @foo(i32 %a) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i32 0, [[A:%.*]]
+; CHECK-NEXT: br label [[BB4:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[LOCAL:%.*]] = sub nsw i32 0, 0
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i32 0
+; CHECK-NEXT: [[ADD:%.*]] = icmp eq i32 [[TMP0]], [[LOCAL]]
+; CHECK-NEXT: ret i1 [[ADD]]
+;
+entry:
+ %0 = sub nsw i32 0, %a
+ br label %bb1
+
+bb1:
+ %local = sub nsw i32 0, 0
+ %ins1 = insertelement <2 x i32> poison, i32 %0, i32 0
+ %add = icmp eq i32 %0, %local
+ ret i1 %add
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll
index a0af8e3..f4a4714 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-3 < %s | FileCheck %s
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-9 < %s | FileCheck %s
define void @t(i64 %v) {
; CHECK-LABEL: define void @t(
@@ -7,10 +7,9 @@ define void @t(i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i64> poison, i64 [[V]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[TMP0]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[TMP1]] to <4 x i16>
-; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i16> [[TMP2]], <i16 5, i16 6, i16 3, i16 2>
-; CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = sext i16 [[TMP4]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[TMP1]] to <4 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], <i32 5, i32 6, i32 3, i32 2>
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP3]])
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 65535
; CHECK-NEXT: store i32 [[TMP6]], ptr null, align 4
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-icmp-to-trunc.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-icmp-to-trunc.ll
new file mode 100644
index 0000000..fc28d7a
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-icmp-to-trunc.ll
@@ -0,0 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux -mcpu=cascadelake < %s | FileCheck %s
+
+define i1 @test(ptr noalias %0, i64 %1, ptr noalias %p, ptr %p1) {
+; CHECK-LABEL: define i1 @test(
+; CHECK-SAME: ptr noalias [[TMP0:%.*]], i64 [[TMP1:%.*]], ptr noalias [[P:%.*]], ptr [[P1:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: newFuncRoot:
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 16
+; CHECK-NEXT: [[BF_LOAD_I1336:%.*]] = load i24, ptr [[TMP2]], align 16
+; CHECK-NEXT: [[AND_I_I_I1342:%.*]] = and i64 [[TMP1]], -16
+; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[AND_I_I_I1342]] to ptr
+; CHECK-NEXT: store ptr [[TMP3]], ptr [[P]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 16
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 16
+; CHECK-NEXT: [[BF_LOAD_I1345:%.*]] = load i24, ptr [[TMP5]], align 16
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i24> poison, i24 [[BF_LOAD_I1336]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i24> [[TMP6]], i24 [[BF_LOAD_I1345]], i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = and <2 x i24> [[TMP7]], <i24 255, i24 255>
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <2 x i24> [[TMP8]], <i24 24, i24 24>
+; CHECK-NEXT: [[TMP10:%.*]] = select <2 x i1> [[TMP9]], <2 x i24> <i24 23, i24 23>, <2 x i24> [[TMP8]]
+; CHECK-NEXT: [[TMP23:%.*]] = trunc <2 x i24> [[TMP10]] to <2 x i8>
+; CHECK-NEXT: [[TMP11:%.*]] = zext <2 x i8> [[TMP23]] to <2 x i32>
+; CHECK-NEXT: [[TMP12:%.*]] = and <2 x i32> [[TMP11]], <i32 254, i32 254>
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq <2 x i32> [[TMP12]], <i32 4, i32 4>
+; CHECK-NEXT: [[TMP25:%.*]] = select <2 x i1> [[TMP13]], <2 x i8> <i8 2, i8 2>, <2 x i8> [[TMP23]]
+; CHECK-NEXT: [[TMP14:%.*]] = zext <2 x i8> [[TMP25]] to <2 x i32>
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq <2 x i32> [[TMP14]], <i32 32, i32 32>
+; CHECK-NEXT: [[TMP18:%.*]] = select <2 x i1> [[TMP15]], <2 x i8> <i8 31, i8 31>, <2 x i8> [[TMP25]]
+; CHECK-NEXT: [[TMP16:%.*]] = zext <2 x i8> [[TMP18]] to <2 x i32>
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <2 x i32> [[TMP16]], <i32 54, i32 54>
+; CHECK-NEXT: [[TMP21:%.*]] = select <2 x i1> [[TMP17]], <2 x i8> <i8 53, i8 53>, <2 x i8> [[TMP18]]
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i8> [[TMP21]], i32 0
+; CHECK-NEXT: [[TMP19:%.*]] = zext i8 [[TMP22]] to i32
+; CHECK-NEXT: store i32 [[TMP19]], ptr [[P1]], align 4
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x i8> [[TMP21]], i32 1
+; CHECK-NEXT: [[TMP20:%.*]] = zext i8 [[TMP24]] to i32
+; CHECK-NEXT: [[CMP210_NOT:%.*]] = icmp eq i32 [[TMP19]], [[TMP20]]
+; CHECK-NEXT: ret i1 [[CMP210_NOT]]
+;
+newFuncRoot:
+ %2 = getelementptr inbounds i8, ptr %0, i64 16
+ %bf.load.i1336 = load i24, ptr %2, align 16
+ %bf.clear.i1337 = and i24 %bf.load.i1336, 255
+ %and.i.i.i1342 = and i64 %1, -16
+ %3 = inttoptr i64 %and.i.i.i1342 to ptr
+ store ptr %3, ptr %p, align 8
+ %4 = load ptr, ptr %3, align 16
+ %5 = getelementptr inbounds i8, ptr %4, i64 16
+ %bf.load.i1345 = load i24, ptr %5, align 16
+ %bf.clear.i1346 = and i24 %bf.load.i1345, 255
+ %cmp182 = icmp eq i24 %bf.clear.i1337, 24
+ %narrow = select i1 %cmp182, i24 23, i24 %bf.clear.i1337
+ %s = zext nneg i24 %narrow to i32
+ %cmp185 = icmp eq i24 %bf.clear.i1346, 24
+ %narrow1790 = select i1 %cmp185, i24 23, i24 %bf.clear.i1346
+ %s1139 = zext nneg i24 %narrow1790 to i32
+ %6 = and i32 %s, 254
+ %or.cond1132 = icmp eq i32 %6, 4
+ %s1142 = select i1 %or.cond1132, i32 2, i32 %s
+ %7 = and i32 %s1139, 254
+ %or.cond1133 = icmp eq i32 %7, 4
+ %s1140 = select i1 %or.cond1133, i32 2, i32 %s1139
+ %cmp198 = icmp eq i32 %s1142, 32
+ %s1134 = select i1 %cmp198, i32 31, i32 %s1142
+ %cmp201 = icmp eq i32 %s1140, 32
+ %s1143 = select i1 %cmp201, i32 31, i32 %s1140
+ %cmp204 = icmp eq i32 %s1134, 54
+ %s1135 = select i1 %cmp204, i32 53, i32 %s1134
+ store i32 %s1135, ptr %p1, align 4
+ %cmp207 = icmp eq i32 %s1143, 54
+ %s1141 = select i1 %cmp207, i32 53, i32 %s1143
+ %cmp210.not = icmp eq i32 %s1135, %s1141
+ ret i1 %cmp210.not
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll
index 6e512fc..6051638 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll
@@ -6,18 +6,17 @@ define void @test(i8 %0) {
; CHECK-SAME: i8 [[TMP0:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> <i8 0, i8 poison>, i8 [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i16>
-; CHECK-NEXT: [[TMP3:%.*]] = sext <2 x i16> [[TMP2]] to <2 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = mul <2 x i16> [[TMP2]], zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i16> [[TMP4]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i16> [[TMP4]], i32 1
-; CHECK-NEXT: [[TMP8:%.*]] = zext i16 [[TMP7]] to i32
-; CHECK-NEXT: [[ADD:%.*]] = or i32 [[TMP6]], [[TMP8]]
+; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <2 x i8> [[TMP1]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i8> [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i8> [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = zext i8 [[TMP6]] to i32
+; CHECK-NEXT: [[ADD:%.*]] = or i32 [[TMP5]], [[TMP7]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[ADD]], 1
; CHECK-NEXT: [[CONV9:%.*]] = trunc i32 [[SHR]] to i8
; CHECK-NEXT: store i8 [[CONV9]], ptr null, align 1
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> poison, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> poison, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-node-but-not-operands.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-node-but-not-operands.ll
new file mode 100644
index 0000000..fcccaf2
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-node-but-not-operands.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt --passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @test() {
+; CHECK-LABEL: define void @test() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store <8 x i16> zeroinitializer, ptr null, align 2
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx8 = getelementptr i8, ptr null, i64 2
+ %shr10 = ashr i32 0, 0
+ %shr19 = lshr i32 0, 0
+ %sub20 = or i32 %shr19, %shr10
+ %xor21 = xor i32 %sub20, 0
+ %conv22 = trunc i32 %xor21 to i16
+ store i16 %conv22, ptr %arrayidx8, align 2
+ %arrayidx28 = getelementptr i8, ptr null, i64 4
+ %shr34 = lshr i32 0, 0
+ %sub35 = or i32 %shr34, %shr10
+ %xor36 = xor i32 %sub35, 0
+ %conv37 = trunc i32 %xor36 to i16
+ store i16 %conv37, ptr %arrayidx28, align 2
+ %arrayidx43 = getelementptr i8, ptr null, i64 6
+ %shr49 = lshr i32 0, 0
+ %sub50 = or i32 %shr49, %shr10
+ %xor51 = xor i32 %sub50, 0
+ %conv52 = trunc i32 %xor51 to i16
+ store i16 %conv52, ptr %arrayidx43, align 2
+ %arrayidx.1 = getelementptr i8, ptr null, i64 8
+ %shr.1 = lshr i32 0, 0
+ %xor2.1 = xor i32 %shr.1, %shr10
+ %sub3.1 = or i32 %xor2.1, 0
+ %conv4.1 = trunc i32 %sub3.1 to i16
+ store i16 %conv4.1, ptr %arrayidx.1, align 2
+ %arrayidx8.1 = getelementptr i8, ptr null, i64 10
+ %shr10.1 = ashr i32 0, 0
+ %shr19.1 = lshr i32 0, 0
+ %sub20.1 = or i32 %shr19.1, %shr10.1
+ %xor21.1 = xor i32 %sub20.1, 0
+ %conv22.1 = trunc i32 %xor21.1 to i16
+ store i16 %conv22.1, ptr %arrayidx8.1, align 2
+ %arrayidx28.1 = getelementptr i8, ptr null, i64 12
+ %shr34.1 = lshr i32 0, 0
+ %sub35.1 = or i32 %shr34.1, %shr10.1
+ %xor36.1 = xor i32 %sub35.1, 0
+ %conv37.1 = trunc i32 %xor36.1 to i16
+ store i16 %conv37.1, ptr %arrayidx28.1, align 2
+ %arrayidx43.1 = getelementptr i8, ptr null, i64 14
+ %shr49.1 = lshr i32 0, 0
+ %sub50.1 = or i32 %shr49.1, %shr10.1
+ %xor51.1 = xor i32 %sub50.1, 0
+ %conv52.1 = trunc i32 %xor51.1 to i16
+ store i16 %conv52.1, ptr %arrayidx43.1, align 2
+ %shr.2 = lshr i32 0, 0
+ %xor2.2 = xor i32 %shr.2, %shr10.1
+ %sub3.2 = or i32 %xor2.2, 0
+ %conv4.2 = trunc i32 %sub3.2 to i16
+ store i16 %conv4.2, ptr null, align 2
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-transformed-operand.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-transformed-operand.ll
index 2c83461..4acd630 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-transformed-operand.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-transformed-operand.ll
@@ -6,15 +6,20 @@ define void @test(i64 %d.promoted.i) {
; CHECK-SAME: i64 [[D_PROMOTED_I:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[AND_1_I:%.*]] = and i64 0, [[D_PROMOTED_I]]
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i64> <i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0>, i64 [[AND_1_I]], i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[TMP0]] to <8 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i1> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[AND_1_I_1:%.*]] = and i64 0, 0
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i64> <i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0>, i64 [[AND_1_I_1]], i32 1
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i64> [[TMP0]], i64 [[AND_1_I]], i32 9
-; CHECK-NEXT: [[TMP2:%.*]] = trunc <16 x i64> [[TMP1]] to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = mul <16 x i1> [[TMP2]], zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = zext i1 [[TMP4]] to i32
-; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 0
-; CHECK-NEXT: store i32 [[TMP6]], ptr null, align 4
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i64> <i64 0, i64 poison, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0>, i64 [[AND_1_I_1]], i32 1
+; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[TMP3]] to <8 x i1>
+; CHECK-NEXT: [[TMP5:%.*]] = mul <8 x i1> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP5]])
+; CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[TMP6]] to i32
+; CHECK-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP2]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i1 [[TMP8]] to i32
+; CHECK-NEXT: [[OP_RDX:%.*]] = or i32 [[TMP7]], [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = and i32 [[OP_RDX]], 0
+; CHECK-NEXT: store i32 [[TMP10]], ptr null, align 4
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll
new file mode 100644
index 0000000..50b19d0
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @test(ptr %block, ptr noalias %pixels, i1 %b) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[BLOCK:%.*]], ptr noalias [[PIXELS:%.*]], i1 [[B:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i1> <i1 true, i1 poison, i1 false, i1 false>, i1 [[B]], i32 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr [[BLOCK]], align 2
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ult <4 x i16> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = trunc <4 x i16> [[TMP2]] to <4 x i8>
+; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i1> [[TMP0]] to <4 x i8>
+; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP3]], <4 x i8> [[TMP4]], <4 x i8> [[TMP1]]
+; CHECK-NEXT: store <4 x i8> [[TMP5]], ptr [[PIXELS]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i16, ptr %block, align 2
+ %tobool.not.i78 = icmp ult i16 %0, 0
+ %conv.i80 = sext i1 true to i8
+ %conv1.i81 = trunc i16 %0 to i8
+ %retval.0.i82 = select i1 %tobool.not.i78, i8 %conv1.i81, i8 %conv.i80
+ store i8 %retval.0.i82, ptr %pixels, align 1
+ %arrayidx2 = getelementptr i8, ptr %block, i64 2
+ %1 = load i16, ptr %arrayidx2, align 2
+ %tobool.not.i73 = icmp ult i16 %1, 0
+ %conv.i75 = sext i1 %b to i8
+ %conv1.i76 = trunc i16 %1 to i8
+ %retval.0.i77 = select i1 %tobool.not.i73, i8 %conv1.i76, i8 %conv.i75
+ %arrayidx5 = getelementptr i8, ptr %pixels, i64 1
+ store i8 %retval.0.i77, ptr %arrayidx5, align 1
+ %arrayidx6 = getelementptr i8, ptr %block, i64 4
+ %2 = load i16, ptr %arrayidx6, align 2
+ %tobool.not.i68 = icmp ult i16 %2, 0
+ %conv.i70 = sext i1 false to i8
+ %conv1.i71 = trunc i16 %2 to i8
+ %retval.0.i72 = select i1 %tobool.not.i68, i8 %conv1.i71, i8 %conv.i70
+ %arrayidx9 = getelementptr i8, ptr %pixels, i64 2
+ store i8 %retval.0.i72, ptr %arrayidx9, align 1
+ %arrayidx10 = getelementptr i8, ptr %block, i64 6
+ %3 = load i16, ptr %arrayidx10, align 2
+ %tobool.not.i63 = icmp ult i16 %3, 0
+ %conv.i65 = sext i1 false to i8
+ %conv1.i66 = trunc i16 %3 to i8
+ %retval.0.i67 = select i1 %tobool.not.i63, i8 %conv1.i66, i8 %conv.i65
+ %arrayidx13 = getelementptr i8, ptr %pixels, i64 3
+ store i8 %retval.0.i67, ptr %arrayidx13, align 1
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll b/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll
index 651631d..a316415 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll
@@ -17,12 +17,15 @@ target triple = "x86_64-unknown-linux-gnu"
define i8 @PR31243_zext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
; SSE-LABEL: @PR31243_zext(
; SSE-NEXT: entry:
-; SSE-NEXT: [[TMP0:%.*]] = or i8 [[V0:%.*]], 1
-; SSE-NEXT: [[TMP1:%.*]] = or i8 [[V1:%.*]], 1
-; SSE-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0]] to i64
-; SSE-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
-; SSE-NEXT: [[TMP3:%.*]] = zext i8 [[TMP1]] to i64
-; SSE-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
+; SSE-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
+; SSE-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
+; SSE-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
+; SSE-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
+; SSE-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
+; SSE-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
+; SSE-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
+; SSE-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
+; SSE-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
; SSE-NEXT: [[T6:%.*]] = load i8, ptr [[T4]], align 1
; SSE-NEXT: [[T7:%.*]] = load i8, ptr [[T5]], align 1
; SSE-NEXT: [[T8:%.*]] = add i8 [[T6]], [[T7]]
@@ -73,12 +76,15 @@ entry:
define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
; SSE-LABEL: @PR31243_sext(
; SSE-NEXT: entry:
-; SSE-NEXT: [[TMP0:%.*]] = or i8 [[V0:%.*]], 1
-; SSE-NEXT: [[TMP1:%.*]] = or i8 [[V1:%.*]], 1
-; SSE-NEXT: [[TMP2:%.*]] = sext i8 [[TMP0]] to i64
-; SSE-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
-; SSE-NEXT: [[TMP3:%.*]] = sext i8 [[TMP1]] to i64
-; SSE-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
+; SSE-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
+; SSE-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
+; SSE-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
+; SSE-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
+; SSE-NEXT: [[TMP4:%.*]] = sext i8 [[TMP3]] to i64
+; SSE-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
+; SSE-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
+; SSE-NEXT: [[TMP6:%.*]] = sext i8 [[TMP5]] to i64
+; SSE-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
; SSE-NEXT: [[T6:%.*]] = load i8, ptr [[T4]], align 1
; SSE-NEXT: [[T7:%.*]] = load i8, ptr [[T5]], align 1
; SSE-NEXT: [[T8:%.*]] = add i8 [[T6]], [[T7]]
@@ -89,13 +95,12 @@ define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
; AVX-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
; AVX-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
-; AVX-NEXT: [[TMP3:%.*]] = sext <2 x i8> [[TMP2]] to <2 x i16>
-; AVX-NEXT: [[TMP4:%.*]] = extractelement <2 x i16> [[TMP3]], i64 0
-; AVX-NEXT: [[TMP5:%.*]] = sext i16 [[TMP4]] to i64
-; AVX-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP5]]
-; AVX-NEXT: [[TMP6:%.*]] = extractelement <2 x i16> [[TMP3]], i64 1
-; AVX-NEXT: [[TMP7:%.*]] = sext i16 [[TMP6]] to i64
-; AVX-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP7]]
+; AVX-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
+; AVX-NEXT: [[TMP4:%.*]] = sext i8 [[TMP3]] to i64
+; AVX-NEXT: [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
+; AVX-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
+; AVX-NEXT: [[TMP6:%.*]] = sext i8 [[TMP5]] to i64
+; AVX-NEXT: [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
; AVX-NEXT: [[T6:%.*]] = load i8, ptr [[T4]], align 1
; AVX-NEXT: [[T7:%.*]] = load i8, ptr [[T5]], align 1
; AVX-NEXT: [[T8:%.*]] = add i8 [[T6]], [[T7]]
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll b/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll
new file mode 100644
index 0000000..f376ca7
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define i32 @test(ptr %b, ptr %c, i32 %0, ptr %a, i1 %tobool3.not) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ptr [[B:%.*]], ptr [[C:%.*]], i32 [[TMP0:%.*]], ptr [[A:%.*]], i1 [[TOBOOL3_NOT:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[TOBOOL3_NOT]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = ashr <4 x i32> [[TMP2]], <i32 16, i32 16, i32 16, i32 16>
+; CHECK-NEXT: [[TMP4:%.*]] = icmp slt <4 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i1> [[TMP4]] to <4 x i16>
+; CHECK-NEXT: br label [[BB3:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = zext <4 x i1> [[TMP8]] to <4 x i32>
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i1> poison, i1 [[TOBOOL3_NOT]], i32 0
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i1> [[TMP10]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> [[TMP7]], <4 x i32> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shl <4 x i32> [[TMP12]], <i32 16, i32 16, i32 16, i32 16>
+; CHECK-NEXT: [[TMP14:%.*]] = ashr <4 x i32> [[TMP13]], <i32 16, i32 16, i32 16, i32 16>
+; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i32> [[TMP14]] to <4 x i16>
+; CHECK-NEXT: br i1 true, label [[BB3]], label [[BB2]]
+; CHECK: bb3:
+; CHECK-NEXT: [[TMP16:%.*]] = phi <4 x i16> [ [[TMP5]], [[BB1]] ], [ [[TMP15]], [[BB2]] ]
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i16> [[TMP16]], i32 0
+; CHECK-NEXT: [[TMP18:%.*]] = sext i16 [[TMP17]] to i32
+; CHECK-NEXT: store i32 [[TMP18]], ptr [[B]], align 16
+; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i16> [[TMP16]], i32 1
+; CHECK-NEXT: [[TMP20:%.*]] = sext i16 [[TMP19]] to i32
+; CHECK-NEXT: store i32 [[TMP20]], ptr [[A]], align 8
+; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i16> [[TMP16]], i32 2
+; CHECK-NEXT: [[TMP22:%.*]] = sext i16 [[TMP21]] to i32
+; CHECK-NEXT: store i32 [[TMP22]], ptr [[C]], align 16
+; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i16> [[TMP16]], i32 3
+; CHECK-NEXT: [[TMP24:%.*]] = sext i16 [[TMP23]] to i32
+; CHECK-NEXT: store i32 [[TMP24]], ptr [[B]], align 8
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br i1 %tobool3.not, label %bb1, label %bb2
+
+bb1:
+ %conv1.i.us = ashr i32 %0, 16
+ %cmp2.i.us = icmp slt i32 %conv1.i.us, %0
+ %sext26.us = zext i1 %cmp2.i.us to i32
+ %conv1.i.us.5 = ashr i32 %0, 16
+ %cmp2.i.us.5 = icmp slt i32 %conv1.i.us.5, %0
+ %sext26.us.5 = zext i1 %cmp2.i.us.5 to i32
+ %conv1.i.us.6 = ashr i32 %0, 16
+ %cmp2.i.us.6 = icmp slt i32 %conv1.i.us.6, %0
+ %sext26.us.6 = zext i1 %cmp2.i.us.6 to i32
+ %conv1.i.us.7 = ashr i32 %0, 16
+ %cmp2.i.us.7 = icmp slt i32 %conv1.i.us.7, %0
+ %sext26.us.7 = zext i1 %cmp2.i.us.7 to i32
+ br label %bb3
+
+bb2:
+ %cmp2.i = icmp sgt i32 %0, 0
+ %1 = zext i1 %cmp2.i to i32
+ %cond.i = select i1 %tobool3.not, i32 %0, i32 %1
+ %sext26 = shl i32 %cond.i, 16
+ %conv13 = ashr i32 %sext26, 16
+ %cmp2.i.5 = icmp sgt i32 %0, 0
+ %2 = zext i1 %cmp2.i.5 to i32
+ %cond.i.5 = select i1 %tobool3.not, i32 %0, i32 %2
+ %sext26.5 = shl i32 %cond.i.5, 16
+ %conv13.5 = ashr i32 %sext26.5, 16
+ %cmp2.i.6 = icmp sgt i32 %0, 0
+ %3 = zext i1 %cmp2.i.6 to i32
+ %cond.i.6 = select i1 %tobool3.not, i32 %0, i32 %3
+ %sext26.6 = shl i32 %cond.i.6, 16
+ %conv13.6 = ashr i32 %sext26.6, 16
+ %cmp2.i.7 = icmp sgt i32 %0, 0
+ %4 = zext i1 %cmp2.i.7 to i32
+ %cond.i.7 = select i1 %tobool3.not, i32 %0, i32 %4
+ %sext26.7 = shl i32 %cond.i.7, 16
+ %conv13.7 = ashr i32 %sext26.7, 16
+ br i1 true, label %bb3, label %bb2
+
+bb3:
+ %conv13p = phi i32 [ %sext26.us, %bb1 ], [ %conv13, %bb2 ]
+ %conv13.5p = phi i32 [ %sext26.us.5, %bb1 ], [ %conv13.5, %bb2 ]
+ %conv13.6p = phi i32 [ %sext26.us.6, %bb1 ], [ %conv13.6, %bb2 ]
+ %conv13.7p = phi i32 [ %sext26.us.7, %bb1 ], [ %conv13.7, %bb2 ]
+ store i32 %conv13p, ptr %b, align 16
+ store i32 %conv13.5p, ptr %a, align 8
+ store i32 %conv13.6p, ptr %c, align 16
+ store i32 %conv13.7p, ptr %b, align 8
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll b/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll
index 88f75c3..3cc32c1 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll
@@ -15,8 +15,8 @@ define i32 @phi3UndefInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %arg3) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 undef, i8 undef, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -52,8 +52,8 @@ define i32 @phi2UndefInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %arg3) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 undef, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -89,8 +89,8 @@ define i32 @phi1UndefInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %arg3) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 0, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -127,8 +127,8 @@ define i32 @phi1Undef1PoisonInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %ar
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 poison, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -165,8 +165,8 @@ define i32 @phi1Undef2PoisonInputs(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8 %a
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 poison, i8 poison, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
@@ -202,8 +202,8 @@ define i32 @phi1Undef1PoisonGapInput(i1 %cond, i8 %arg0, i8 %arg1, i8 %arg2, i8
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ [[TMP3]], [[BB2]] ], [ <i8 0, i8 0, i8 poison, i8 undef>, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll
index 6f5d3d3..cfbbe14 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll
@@ -10,10 +10,8 @@ define void @test() {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARRAYIDX22]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[TMP0]]
-; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i32> [[TMP3]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = ashr <4 x i64> [[TMP4]], zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i64> [[TMP5]] to <4 x i32>
-; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
+; CHECK-NEXT: [[TMP4:%.*]] = ashr <4 x i32> [[TMP3]], zeroinitializer
+; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
; CHECK-NEXT: ret void
;
entry:
@@ -108,3 +106,107 @@ entry:
store i32 %conv27, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 3), align 4
ret void
}
+
+define void @test_div() {
+; CHECK-LABEL: define void @test_div(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i32, ptr null, i64 60
+; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> getelementptr (i32, <4 x ptr> zeroinitializer, <4 x i64> <i64 1, i64 33, i64 7, i64 0>), i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARRAYIDX22]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[TMP0]]
+; CHECK-NEXT: [[TMP6:%.*]] = udiv <4 x i32> [[TMP3]], <i32 1, i32 2, i32 1, i32 2>
+; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx1 = getelementptr i32, ptr null, i64 1
+ %0 = load i32, ptr %arrayidx1, align 4
+ %arrayidx2 = getelementptr i32, ptr null, i64 63
+ %1 = load i32, ptr %arrayidx2, align 4
+ %mul = mul i32 %1, %0
+ %conv = zext i32 %mul to i64
+ %shr = udiv i64 %conv, 1
+ %conv3 = trunc i64 %shr to i32
+ store i32 %conv3, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
+ %arrayidx5 = getelementptr i32, ptr null, i64 33
+ %2 = load i32, ptr %arrayidx5, align 4
+ %arrayidx6 = getelementptr i32, ptr null, i64 62
+ %3 = load i32, ptr %arrayidx6, align 4
+ %mul7 = mul i32 %3, %2
+ %conv8 = zext i32 %mul7 to i64
+ %shr10 = udiv i64 %conv8, 2
+ %conv11 = trunc i64 %shr10 to i32
+ store i32 %conv11, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 1), align 4
+ %arrayidx13 = getelementptr i32, ptr null, i64 7
+ %4 = load i32, ptr %arrayidx13, align 4
+ %arrayidx14 = getelementptr i32, ptr null, i64 61
+ %5 = load i32, ptr %arrayidx14, align 4
+ %mul15 = mul i32 %5, %4
+ %conv16 = zext i32 %mul15 to i64
+ %shr18 = udiv i64 %conv16, 1
+ %conv19 = trunc i64 %shr18 to i32
+ store i32 %conv19, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 2), align 8
+ %6 = load i32, ptr null, align 4
+ %arrayidx22 = getelementptr i32, ptr null, i64 60
+ %7 = load i32, ptr %arrayidx22, align 4
+ %mul23 = mul i32 %7, %6
+ %conv24 = zext i32 %mul23 to i64
+ %shr26 = udiv i64 %conv24, 2
+ %conv27 = trunc i64 %shr26 to i32
+ store i32 %conv27, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 3), align 4
+ ret void
+}
+
+define void @test_rem() {
+; CHECK-LABEL: define void @test_rem(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i32, ptr null, i64 60
+; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> getelementptr (i32, <4 x ptr> zeroinitializer, <4 x i64> <i64 1, i64 33, i64 7, i64 0>), i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARRAYIDX22]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[TMP0]]
+; CHECK-NEXT: [[TMP6:%.*]] = urem <4 x i32> [[TMP3]], <i32 1, i32 2, i32 1, i32 1>
+; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx1 = getelementptr i32, ptr null, i64 1
+ %0 = load i32, ptr %arrayidx1, align 4
+ %arrayidx2 = getelementptr i32, ptr null, i64 63
+ %1 = load i32, ptr %arrayidx2, align 4
+ %mul = mul i32 %1, %0
+ %conv = zext i32 %mul to i64
+ %shr = urem i64 %conv, 1
+ %conv3 = trunc i64 %shr to i32
+ store i32 %conv3, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
+ %arrayidx5 = getelementptr i32, ptr null, i64 33
+ %2 = load i32, ptr %arrayidx5, align 4
+ %arrayidx6 = getelementptr i32, ptr null, i64 62
+ %3 = load i32, ptr %arrayidx6, align 4
+ %mul7 = mul i32 %3, %2
+ %conv8 = zext i32 %mul7 to i64
+ %shr10 = urem i64 %conv8, 2
+ %conv11 = trunc i64 %shr10 to i32
+ store i32 %conv11, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 1), align 4
+ %arrayidx13 = getelementptr i32, ptr null, i64 7
+ %4 = load i32, ptr %arrayidx13, align 4
+ %arrayidx14 = getelementptr i32, ptr null, i64 61
+ %5 = load i32, ptr %arrayidx14, align 4
+ %mul15 = mul i32 %5, %4
+ %conv16 = zext i32 %mul15 to i64
+ %shr18 = urem i64 %conv16, 1
+ %conv19 = trunc i64 %shr18 to i32
+ store i32 %conv19, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 2), align 8
+ %6 = load i32, ptr null, align 4
+ %arrayidx22 = getelementptr i32, ptr null, i64 60
+ %7 = load i32, ptr %arrayidx22, align 4
+ %mul23 = mul i32 %7, %6
+ %conv24 = zext i32 %mul23 to i64
+ %shr26 = urem i64 %conv24, 1
+ %conv27 = trunc i64 %shr26 to i32
+ store i32 %conv27, ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 3), align 4
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
index 86b1e1a..9682567 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
@@ -5,18 +5,19 @@ define void @test() {
; CHECK-LABEL: @test(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr undef, i64 4
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x [4 x i32]], ptr undef, i64 0, i64 1, i64 0
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
-; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = shl nsw <4 x i32> [[TMP6]], zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[TMP7]], zeroinitializer
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-NEXT: [[TMP10:%.*]] = add nsw <4 x i32> [[TMP8]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = sub nsw <4 x i32> [[TMP8]], [[TMP9]]
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x i32> [[TMP10]], <4 x i32> [[TMP11]], <4 x i32> <i32 1, i32 4, i32 3, i32 6>
-; CHECK-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> zeroinitializer, [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP12]]
-; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x i32> [[TMP13]], <4 x i32> [[TMP14]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1
+; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP3]] to <4 x i16>
+; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> zeroinitializer, [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = shl <4 x i16> [[TMP5]], zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i16> [[TMP6]], zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i16> [[TMP7]], <4 x i16> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i16> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i32> <i32 1, i32 4, i32 3, i32 6>
+; CHECK-NEXT: [[TMP12:%.*]] = add <4 x i16> zeroinitializer, [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub <4 x i16> zeroinitializer, [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; CHECK-NEXT: [[TMP15:%.*]] = sext <4 x i16> [[TMP14]] to <4 x i32>
; CHECK-NEXT: store <4 x i32> [[TMP15]], ptr [[TMP2]], align 16
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder_phi.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder_phi.ll
index 65229d8..f0e734d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder_phi.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder_phi.ll
@@ -10,29 +10,29 @@ define void @foo (ptr %A, ptr %B, ptr %Result) {
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP18:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x float> [ zeroinitializer, [[ENTRY]] ], [ [[TMP17:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x float> [ zeroinitializer, [[ENTRY]] ], [ [[TMP20:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX:%.*]], ptr [[A:%.*]], i64 [[TMP1]], i32 0
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX]], ptr [[B:%.*]], i64 [[TMP1]], i32 0
; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX]], ptr [[B]], i64 [[TMP1]], i32 1
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x float>, ptr [[TMP3]], align 4
-; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x float> poison, float [[TMP5]], i32 0
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x float> [[TMP10]], <2 x float> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP11:%.*]] = fmul <2 x float> [[TMP9]], [[SHUFFLE]]
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x float> poison, float [[TMP5]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = fmul <2 x float> [[TMP8]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x float> poison, float [[TMP7]], i32 0
-; CHECK-NEXT: [[SHUFFLE1:%.*]] = shufflevector <2 x float> [[TMP12]], <2 x float> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = fmul <2 x float> [[TMP9]], [[SHUFFLE1]]
-; CHECK-NEXT: [[SHUFFLE2:%.*]] = shufflevector <2 x float> [[TMP13]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP14:%.*]] = fsub <2 x float> [[TMP11]], [[SHUFFLE2]]
-; CHECK-NEXT: [[TMP15:%.*]] = fadd <2 x float> [[TMP11]], [[SHUFFLE2]]
-; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <2 x float> [[TMP14]], <2 x float> [[TMP15]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP17]] = fadd <2 x float> [[TMP2]], [[TMP16]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x float> [[TMP12]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = fmul <2 x float> [[TMP8]], [[TMP13]]
+; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <2 x float> [[TMP14]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP16:%.*]] = fsub <2 x float> [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = fadd <2 x float> [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <2 x float> [[TMP16]], <2 x float> [[TMP17]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP20]] = fadd <2 x float> [[TMP2]], [[TMP21]]
; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP19]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: store <2 x float> [[TMP17]], ptr [[RESULT:%.*]], align 4
+; CHECK-NEXT: store <2 x float> [[TMP20]], ptr [[RESULT:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/resched.ll b/llvm/test/Transforms/SLPVectorizer/X86/resched.ll
index 78c6d95..b7237cb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/resched.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/resched.ll
@@ -11,26 +11,26 @@ define fastcc void @_ZN12_GLOBAL__N_127PolynomialMultiplyRecognize9recognizeEv()
; CHECK: if.then22.i:
; CHECK-NEXT: [[SUB_I:%.*]] = add nsw i32 undef, -1
; CHECK-NEXT: [[CONV31_I:%.*]] = and i32 undef, [[SUB_I]]
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> poison, i32 [[CONV31_I]], i32 0
-; CHECK-NEXT: [[SHUFFLE1:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = lshr <4 x i32> [[SHUFFLE1]], <i32 1, i32 2, i32 3, i32 4>
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[CONV31_I]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], <i32 1, i32 2, i32 3, i32 4>
; CHECK-NEXT: [[SHR_4_I_I:%.*]] = lshr i32 [[CONV31_I]], 5
; CHECK-NEXT: [[SHR_5_I_I:%.*]] = lshr i32 [[CONV31_I]], 6
; CHECK-NEXT: [[SHR_6_I_I:%.*]] = lshr i32 [[CONV31_I]], 7
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> poison, i32 [[CONV31_I]], i32 0
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = lshr <8 x i32> [[SHUFFLE]], <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i32> poison, i32 [[SUB_I]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i32> [[TMP5]], <16 x i32> [[TMP6]], <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i32> [[TMP7]], i32 [[SHR_4_I_I]], i32 5
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i32> [[TMP8]], i32 [[SHR_5_I_I]], i32 6
-; CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i32> [[TMP9]], i32 [[SHR_6_I_I]], i32 7
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <16 x i32> [[TMP10]], <16 x i32> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
-; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[TMP12]] to <16 x i8>
-; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i8> [[TMP13]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-; CHECK-NEXT: store <16 x i8> [[TMP14]], ptr undef, align 1
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = lshr <8 x i32> [[TMP4]], <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i32> poison, i32 [[SUB_I]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i32> [[TMP6]], <16 x i32> [[TMP7]], <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i32> [[TMP8]], i32 [[SHR_4_I_I]], i32 5
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i32> [[TMP9]], i32 [[SHR_5_I_I]], i32 6
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i32> [[TMP10]], i32 [[SHR_6_I_I]], i32 7
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <16 x i32> [[TMP11]], <16 x i32> [[TMP12]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; CHECK-NEXT: [[TMP14:%.*]] = trunc <16 x i32> [[TMP13]] to <16 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i8> [[TMP14]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: store <16 x i8> [[TMP15]], ptr undef, align 1
; CHECK-NEXT: unreachable
; CHECK: if.end50.i:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll
index 5d22b5a..1d1fcec 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reused-reductions-with-minbitwidth.ll
@@ -7,12 +7,10 @@ define i1 @test(i1 %cmp5.not.31) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i1> <i1 poison, i1 false, i1 false, i1 false>, i1 [[CMP5_NOT_31]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i1> [[TMP2]] to <4 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i32> [[TMP3]], <i32 2, i32 1, i32 1, i32 1>
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
-; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 0
-; CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i32 [[TMP6]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i32> [[TMP1]], <i32 2, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = and i32 [[TMP3]], 0
+; CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i32 [[TMP4]], 0
; CHECK-NEXT: ret i1 [[CMP_NOT_I_I]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/same-scalar-in-same-phi-extract.ll b/llvm/test/Transforms/SLPVectorizer/X86/same-scalar-in-same-phi-extract.ll
new file mode 100644
index 0000000..f1be11d
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/same-scalar-in-same-phi-extract.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @test(i32 %arg) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: i32 [[ARG:%.*]]) {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[ARG]], i32 0
+; CHECK-NEXT: br label [[BB2:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: switch i32 0, label [[BB10:%.*]] [
+; CHECK-NEXT: i32 0, label [[BB9:%.*]]
+; CHECK-NEXT: i32 11, label [[BB9]]
+; CHECK-NEXT: i32 1, label [[BB4:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb3:
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: switch i32 0, label [[BB10]] [
+; CHECK-NEXT: i32 18, label [[BB7:%.*]]
+; CHECK-NEXT: i32 1, label [[BB7]]
+; CHECK-NEXT: i32 0, label [[BB10]]
+; CHECK-NEXT: ]
+; CHECK: bb4:
+; CHECK-NEXT: [[TMP3:%.*]] = phi <2 x i32> [ [[TMP0]], [[BB2]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i32, ptr null, i64 [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[GETELEMENTPTR6:%.*]] = getelementptr i32, ptr null, i64 [[TMP6]]
+; CHECK-NEXT: ret void
+; CHECK: bb7:
+; CHECK-NEXT: [[PHI8:%.*]] = phi i64 [ [[TMP2]], [[BB3:%.*]] ], [ [[TMP2]], [[BB3]] ]
+; CHECK-NEXT: br label [[BB9]]
+; CHECK: bb9:
+; CHECK-NEXT: ret void
+; CHECK: bb10:
+; CHECK-NEXT: ret void
+;
+bb:
+ %zext = zext i32 %arg to i64
+ %zext1 = zext i32 0 to i64
+ br label %bb2
+
+bb2:
+ switch i32 0, label %bb10 [
+ i32 0, label %bb9
+ i32 11, label %bb9
+ i32 1, label %bb4
+ ]
+
+bb3:
+ switch i32 0, label %bb10 [
+ i32 18, label %bb7
+ i32 1, label %bb7
+ i32 0, label %bb10
+ ]
+
+bb4:
+ %phi = phi i64 [ %zext, %bb2 ]
+ %phi5 = phi i64 [ %zext1, %bb2 ]
+ %getelementptr = getelementptr i32, ptr null, i64 %phi
+ %getelementptr6 = getelementptr i32, ptr null, i64 %phi5
+ ret void
+
+bb7:
+ %phi8 = phi i64 [ %zext, %bb3 ], [ %zext, %bb3 ]
+ br label %bb9
+
+bb9:
+ ret void
+
+bb10:
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/sext-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/sext-inseltpoison.ll
index 5ae0ad9..b64743a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/sext-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/sext-inseltpoison.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE,SLM
+; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
@@ -11,20 +11,10 @@
;
define <2 x i64> @loadext_2i8_to_2i64(ptr %p0) {
-; SSE2-LABEL: @loadext_2i8_to_2i64(
-; SSE2-NEXT: [[P1:%.*]] = getelementptr inbounds i8, ptr [[P0:%.*]], i64 1
-; SSE2-NEXT: [[I0:%.*]] = load i8, ptr [[P0]], align 1
-; SSE2-NEXT: [[I1:%.*]] = load i8, ptr [[P1]], align 1
-; SSE2-NEXT: [[X0:%.*]] = sext i8 [[I0]] to i64
-; SSE2-NEXT: [[X1:%.*]] = sext i8 [[I1]] to i64
-; SSE2-NEXT: [[V0:%.*]] = insertelement <2 x i64> poison, i64 [[X0]], i32 0
-; SSE2-NEXT: [[V1:%.*]] = insertelement <2 x i64> [[V0]], i64 [[X1]], i32 1
-; SSE2-NEXT: ret <2 x i64> [[V1]]
-;
-; SLM-LABEL: @loadext_2i8_to_2i64(
-; SLM-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
-; SLM-NEXT: [[TMP3:%.*]] = sext <2 x i8> [[TMP2]] to <2 x i64>
-; SLM-NEXT: ret <2 x i64> [[TMP3]]
+; SSE-LABEL: @loadext_2i8_to_2i64(
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
+; SSE-NEXT: [[TMP2:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i64>
+; SSE-NEXT: ret <2 x i64> [[TMP2]]
;
; AVX-LABEL: @loadext_2i8_to_2i64(
; AVX-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/sext.ll b/llvm/test/Transforms/SLPVectorizer/X86/sext.ll
index 7d38aeb..744a509 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/sext.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/sext.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE,SLM
+; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
@@ -11,20 +11,10 @@
;
define <2 x i64> @loadext_2i8_to_2i64(ptr %p0) {
-; SSE2-LABEL: @loadext_2i8_to_2i64(
-; SSE2-NEXT: [[P1:%.*]] = getelementptr inbounds i8, ptr [[P0:%.*]], i64 1
-; SSE2-NEXT: [[I0:%.*]] = load i8, ptr [[P0]], align 1
-; SSE2-NEXT: [[I1:%.*]] = load i8, ptr [[P1]], align 1
-; SSE2-NEXT: [[X0:%.*]] = sext i8 [[I0]] to i64
-; SSE2-NEXT: [[X1:%.*]] = sext i8 [[I1]] to i64
-; SSE2-NEXT: [[V0:%.*]] = insertelement <2 x i64> undef, i64 [[X0]], i32 0
-; SSE2-NEXT: [[V1:%.*]] = insertelement <2 x i64> [[V0]], i64 [[X1]], i32 1
-; SSE2-NEXT: ret <2 x i64> [[V1]]
-;
-; SLM-LABEL: @loadext_2i8_to_2i64(
-; SLM-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
-; SLM-NEXT: [[TMP3:%.*]] = sext <2 x i8> [[TMP2]] to <2 x i64>
-; SLM-NEXT: ret <2 x i64> [[TMP3]]
+; SSE-LABEL: @loadext_2i8_to_2i64(
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
+; SSE-NEXT: [[TMP2:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i64>
+; SSE-NEXT: ret <2 x i64> [[TMP2]]
;
; AVX-LABEL: @loadext_2i8_to_2i64(
; AVX-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll
new file mode 100644
index 0000000..e8b854b
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx512vl -passes=slp-vectorizer -slp-threshold=-3 | FileCheck %s
+
+
+define i32 @test(ptr noalias %in, ptr noalias %inn, ptr %out) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr [[IN:%.*]], align 1
+; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i8, ptr [[IN]], i64 2
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr [[GEP_2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i8>, ptr [[INN:%.*]], align 1
+; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i8, ptr [[INN]], i64 2
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i8>, ptr [[GEP_5]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x i8> [[TMP3]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i8> [[TMP2]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP5]], <4 x i8> [[TMP6]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: [[TMP8:%.*]] = sext <4 x i8> [[TMP7]] to <4 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i8> [[TMP1]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i8> [[TMP4]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP9]], <4 x i8> [[TMP10]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: [[TMP12:%.*]] = sext <4 x i8> [[TMP11]] to <4 x i32>
+; CHECK-NEXT: [[TMP13:%.*]] = sub <4 x i32> [[TMP12]], [[TMP8]]
+; CHECK-NEXT: [[TMP14:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[TMP13]], i1 true)
+; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i32> [[TMP14]] to <4 x i16>
+; CHECK-NEXT: store <4 x i16> [[TMP15]], ptr [[OUT:%.*]], align 2
+; CHECK-NEXT: ret i32 undef
+;
+ %load.1 = load i8, ptr %in, align 1
+ %gep.1 = getelementptr inbounds i8, ptr %in, i64 1
+ %load.2 = load i8, ptr %gep.1, align 1
+ %gep.2 = getelementptr inbounds i8, ptr %in, i64 2
+ %load.3 = load i8, ptr %gep.2, align 1
+ %gep.3 = getelementptr inbounds i8, ptr %in, i64 3
+ %load.4 = load i8, ptr %gep.3, align 1
+ %load.5 = load i8, ptr %inn, align 1
+ %gep.4 = getelementptr inbounds i8, ptr %inn, i64 1
+ %load.6 = load i8, ptr %gep.4, align 1
+ %gep.5 = getelementptr inbounds i8, ptr %inn, i64 2
+ %load.7 = load i8, ptr %gep.5, align 1
+ %gep.6 = getelementptr inbounds i8, ptr %inn, i64 3
+ %load.8 = load i8, ptr %gep.6, align 1
+ %sext1 = sext i8 %load.1 to i32
+ %sext2 = sext i8 %load.2 to i32
+ %sext3 = sext i8 %load.3 to i32
+ %sext4 = sext i8 %load.4 to i32
+ %sext5 = sext i8 %load.5 to i32
+ %sext6 = sext i8 %load.6 to i32
+ %sext7 = sext i8 %load.7 to i32
+ %sext8 = sext i8 %load.8 to i32
+ %sub1 = sub i32 %sext1, %sext5
+ %sub2 = sub i32 %sext2, %sext6
+ %sub3 = sub i32 %sext7, %sext3
+ %sub4 = sub i32 %sext8, %sext4
+ %call1 = call i32 @llvm.abs(i32 %sub1, i1 true)
+ %call2 = call i32 @llvm.abs(i32 %sub2, i1 true)
+ %call3 = call i32 @llvm.abs(i32 %sub3, i1 true)
+ %call4 = call i32 @llvm.abs(i32 %sub4, i1 true)
+ %t1 = trunc i32 %call1 to i16
+ %t2 = trunc i32 %call2 to i16
+ %t3 = trunc i32 %call3 to i16
+ %t4 = trunc i32 %call4 to i16
+ %gep.8 = getelementptr inbounds i16, ptr %out, i64 1
+ %gep.9 = getelementptr inbounds i16, ptr %out, i64 2
+ %gep.10 = getelementptr inbounds i16, ptr %out, i64 3
+ store i16 %t1, ptr %out, align 2
+ store i16 %t2, ptr %gep.8, align 2
+ store i16 %t3, ptr %gep.9, align 2
+ store i16 %t4, ptr %gep.10, align 2
+
+ ret i32 undef
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll
index c1dd90d..2f6868d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/store-insertelement-minbitwidth.ll
@@ -8,17 +8,18 @@
; YAML-NEXT: Function: stores
; YAML-NEXT: Args:
; YAML-NEXT: - String: 'Stores SLP vectorized with cost '
-; YAML-NEXT: - Cost: '-3'
+; YAML-NEXT: - Cost: '-7'
; YAML-NEXT: - String: ' and with tree size '
; YAML-NEXT: - TreeSize: '6'
define void @stores(ptr noalias %in, ptr noalias %inn, ptr noalias %out) {
; CHECK-LABEL: @stores(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[IN:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[INN:%.*]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i64>
-; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i64> [[TMP3]], [[TMP4]]
-; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr [[OUT:%.*]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i16>
+; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i16>
+; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i16> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i64>
+; CHECK-NEXT: store <4 x i64> [[TMP6]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
%load.1 = load i8, ptr %in, align 1
@@ -63,17 +64,18 @@ define void @stores(ptr noalias %in, ptr noalias %inn, ptr noalias %out) {
; YAML-NEXT: Function: insertelems
; YAML-NEXT: Args:
; YAML-NEXT: - String: 'SLP vectorized with cost '
-; YAML-NEXT: - Cost: '-5'
+; YAML-NEXT: - Cost: '-9'
; YAML-NEXT: - String: ' and with tree size '
; YAML-NEXT: - TreeSize: '6'
define <4 x i64> @insertelems(ptr noalias %in, ptr noalias %inn) {
; CHECK-LABEL: @insertelems(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[IN:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[INN:%.*]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i64>
-; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i64> [[TMP3]], [[TMP4]]
-; CHECK-NEXT: ret <4 x i64> [[TMP5]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i16>
+; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i16>
+; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i16> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i64>
+; CHECK-NEXT: ret <4 x i64> [[TMP6]]
;
%load.1 = load i8, ptr %in, align 1
%gep.1 = getelementptr inbounds i8, ptr %in, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
index 22cd408..e8395fe 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
@@ -179,11 +179,11 @@ define void @addsub0(ptr noalias %dst, ptr noalias %src) {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 2
; CHECK-NEXT: store i32 [[TMP1]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR2]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = add nsw <2 x i32> [[TMP3]], <i32 -2, i32 -3>
-; CHECK-NEXT: [[TMP5:%.*]] = sub nsw <2 x i32> [[TMP3]], <i32 -2, i32 -3>
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: store <2 x i32> [[TMP6]], ptr [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = add nsw <2 x i32> [[TMP2]], <i32 -2, i32 -3>
+; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <2 x i32> [[TMP2]], <i32 -2, i32 -3>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x i32> [[TMP5]], ptr [[INCDEC_PTR3]], align 4
; CHECK-NEXT: ret void
;
entry:
@@ -212,11 +212,11 @@ define void @addsub1(ptr noalias %dst, ptr noalias %src) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 2
; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 2
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[SRC]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[TMP3:%.*]] = sub nsw <2 x i32> [[TMP1]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: store <2 x i32> [[TMP4]], ptr [[DST]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[SRC]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i32> [[TMP0]], <i32 -1, i32 -1>
+; CHECK-NEXT: [[TMP2:%.*]] = sub nsw <2 x i32> [[TMP0]], <i32 -1, i32 -1>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x i32> [[TMP3]], ptr [[DST]], align 4
; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[INCDEC_PTR2]], align 4
; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
@@ -531,11 +531,11 @@ define void @addsub0f(ptr noalias %dst, ptr noalias %src) {
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[INCDEC_PTR]], align 4
; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 2
; CHECK-NEXT: store float [[TMP1]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <2 x float> [[TMP3]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT: [[TMP5:%.*]] = fsub fast <2 x float> [[TMP3]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> [[TMP5]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = fadd fast <2 x float> [[TMP2]], <float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT: [[TMP4:%.*]] = fsub fast <2 x float> [[TMP2]], <float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP4]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[INCDEC_PTR3]], align 4
; CHECK-NEXT: ret void
;
entry:
@@ -564,11 +564,11 @@ define void @addsub1f(ptr noalias %dst, ptr noalias %src) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 2
; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 2
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[SRC]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <2 x float> [[TMP1]], <float -1.000000e+00, float -1.000000e+00>
-; CHECK-NEXT: [[TMP3:%.*]] = fsub fast <2 x float> [[TMP1]], <float -1.000000e+00, float -1.000000e+00>
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> [[TMP3]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: store <2 x float> [[TMP4]], ptr [[DST]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[SRC]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <2 x float> [[TMP0]], <float -1.000000e+00, float -1.000000e+00>
+; CHECK-NEXT: [[TMP2:%.*]] = fsub fast <2 x float> [[TMP0]], <float -1.000000e+00, float -1.000000e+00>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x float> [[TMP3]], ptr [[DST]], align 4
; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[INCDEC_PTR2]], align 4
; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/zext-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/zext-inseltpoison.ll
index d1f6c41..27996a7 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/zext-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/zext-inseltpoison.ll
@@ -12,13 +12,8 @@
define <2 x i64> @loadext_2i8_to_2i64(ptr %p0) {
; SSE2-LABEL: @loadext_2i8_to_2i64(
-; SSE2-NEXT: [[P1:%.*]] = getelementptr inbounds i8, ptr [[P0:%.*]], i64 1
-; SSE2-NEXT: [[I0:%.*]] = load i8, ptr [[P0]], align 1
-; SSE2-NEXT: [[I1:%.*]] = load i8, ptr [[P1]], align 1
-; SSE2-NEXT: [[X0:%.*]] = zext i8 [[I0]] to i64
-; SSE2-NEXT: [[X1:%.*]] = zext i8 [[I1]] to i64
-; SSE2-NEXT: [[V0:%.*]] = insertelement <2 x i64> poison, i64 [[X0]], i32 0
-; SSE2-NEXT: [[V1:%.*]] = insertelement <2 x i64> [[V0]], i64 [[X1]], i32 1
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
+; SSE2-NEXT: [[V1:%.*]] = zext <2 x i8> [[TMP1]] to <2 x i64>
; SSE2-NEXT: ret <2 x i64> [[V1]]
;
; SLM-LABEL: @loadext_2i8_to_2i64(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/zext.ll b/llvm/test/Transforms/SLPVectorizer/X86/zext.ll
index 829e4ba..9487042 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/zext.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/zext.ll
@@ -12,13 +12,8 @@
define <2 x i64> @loadext_2i8_to_2i64(ptr %p0) {
; SSE2-LABEL: @loadext_2i8_to_2i64(
-; SSE2-NEXT: [[P1:%.*]] = getelementptr inbounds i8, ptr [[P0:%.*]], i64 1
-; SSE2-NEXT: [[I0:%.*]] = load i8, ptr [[P0]], align 1
-; SSE2-NEXT: [[I1:%.*]] = load i8, ptr [[P1]], align 1
-; SSE2-NEXT: [[X0:%.*]] = zext i8 [[I0]] to i64
-; SSE2-NEXT: [[X1:%.*]] = zext i8 [[I1]] to i64
-; SSE2-NEXT: [[V0:%.*]] = insertelement <2 x i64> undef, i64 [[X0]], i32 0
-; SSE2-NEXT: [[V1:%.*]] = insertelement <2 x i64> [[V0]], i64 [[X1]], i32 1
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr [[P0:%.*]], align 1
+; SSE2-NEXT: [[V1:%.*]] = zext <2 x i8> [[TMP1]] to <2 x i64>
; SSE2-NEXT: ret <2 x i64> [[V1]]
;
; SLM-LABEL: @loadext_2i8_to_2i64(
diff --git a/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll b/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll
index 061fbdb..ff6f0bdd 100644
--- a/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll
+++ b/llvm/test/Transforms/SLPVectorizer/alt-cmp-vectorize.ll
@@ -10,8 +10,8 @@ define i32 @alt_cmp(i16 %call46) {
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <4 x i16> [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt <4 x i16> [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i32> <i32 0, i32 5, i32 2, i32 3>
-; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i1> [[TMP4]] to <4 x i16>
-; CHECK-NEXT: [[TMP6:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP5]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i16
; CHECK-NEXT: [[OP_RDX:%.*]] = or i16 [[TMP6]], 0
; CHECK-NEXT: [[EXT:%.*]] = zext i16 [[OP_RDX]] to i32
; CHECK-NEXT: ret i32 [[EXT]]
diff --git a/llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll b/llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll
new file mode 100644
index 0000000..531e964
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer < %s | FileCheck %s
+
+define i32 @test(i4 %0) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: i4 [[TMP0:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 0 to i4
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i8 0 to i4
+; CHECK-NEXT: [[ADD_R:%.*]] = or i4 [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[ADD_R14:%.*]] = or i4 0, [[TMP2]]
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i4 [[ADD_R]], [[ADD_R14]]
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %1 = trunc i8 0 to i4
+ %2 = trunc i8 0 to i4
+ %add.r = or i4 %1, %0
+ %add.r14 = or i4 0, %2
+ %cmp.not = icmp eq i4 %add.r, %add.r14
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/SampleProfile/Inputs/csspgo-import-list-preinliner.prof b/llvm/test/Transforms/SampleProfile/Inputs/csspgo-import-list-preinliner.prof
new file mode 100644
index 0000000..7388d57
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/Inputs/csspgo-import-list-preinliner.prof
@@ -0,0 +1,14 @@
+main:8001:0
+ 1: 0
+ 2: 2000
+ 3: 2000
+ 4: 0
+ 5: 2000
+ 6: 2000
+ 7: 0
+ 8: 0
+ 9: bar:1
+ 1: 1
+ !CFGChecksum: 4294967295
+ !Attributes: 2
+ !CFGChecksum: 563088156202820
diff --git a/llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof b/llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof
new file mode 100644
index 0000000..76a8fc9
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof
@@ -0,0 +1,16 @@
+main:252:0
+ 1: 0
+ 2: 50
+ 5: 50
+ 7: bar:102
+ 1: 51
+ 2: baz:51
+ 1: 51
+ !CFGChecksum: 4294967295
+ !Attributes: 3
+ !CFGChecksum: 281479271677951
+ !Attributes: 2
+ !CFGChecksum: 281582081721716
+bar:1:1
+ 1: 1
+ !CFGChecksum: 281479271677951
diff --git a/llvm/test/Transforms/SampleProfile/csspgo-import-list-preinliner.ll b/llvm/test/Transforms/SampleProfile/csspgo-import-list-preinliner.ll
new file mode 100644
index 0000000..9e342f2
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/csspgo-import-list-preinliner.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -passes='thinlto-pre-link<O2>' -pgo-kind=pgo-sample-use-pipeline -sample-profile-file=%S/Inputs/csspgo-import-list-preinliner.prof -S -profile-summary-cutoff-hot=100000 -sample-profile-use-preinliner=0 | FileCheck %s --check-prefix=DISABLE-PREINLINE
+; RUN: opt < %s -passes='thinlto-pre-link<O2>' -pgo-kind=pgo-sample-use-pipeline -sample-profile-file=%S/Inputs/csspgo-import-list-preinliner.prof -S -profile-summary-cutoff-hot=100000 | FileCheck %s
+
+; The GUID of bar is -2012135647395072713
+
+; DISABLE-PREINLINE-NOT: -2012135647395072713
+; CHECK: [[#]] = !{!"function_entry_count", i64 1, i64 -2012135647395072713}
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @main() #0 {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ call void @llvm.pseudoprobe(i64 0, i64 0, i32 0, i64 0)
+ %call2 = call i32 @bar(), !dbg !9
+ br label %for.cond
+}
+
+declare i32 @bar()
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
+declare void @llvm.pseudoprobe(i64, i64, i32, i64) #1
+
+attributes #0 = { "use-sample-profile" }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7}
+!llvm.pseudo_probe_desc = !{!8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "test.c", directory: "/home/", checksumkind: CSK_MD5, checksum: "1bff37d8b3f7858b0bc29ab4efdf9422")
+!2 = !{!3}
+!3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression())
+!4 = distinct !DIGlobalVariable(name: "x", scope: !0, file: !1, line: 2, type: !5, isLocal: false, isDefinition: true)
+!5 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !6)
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = !{i64 -2624081020897602054, i64 563108639284859, !"main"}
+!9 = !DILocation(line: 11, column: 10, scope: !10)
+!10 = !DILexicalBlockFile(scope: !11, file: !1, discriminator: 186646615)
+!11 = distinct !DILexicalBlock(scope: !12, file: !1, line: 8, column: 40)
+!12 = distinct !DILexicalBlock(scope: !13, file: !1, line: 8, column: 3)
+!13 = distinct !DILexicalBlock(scope: !14, file: !1, line: 8, column: 3)
+!14 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 6, type: !15, scopeLine: 7, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !16)
+!15 = distinct !DISubroutineType(types: !16)
+!16 = !{}
diff --git a/llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll b/llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll
new file mode 100644
index 0000000..df56b55d
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll
@@ -0,0 +1,67 @@
+; REQUIRES: x86_64-linux
+; REQUIRES: asserts
+; RUN: opt < %s -passes='thinlto-pre-link<O2>' -pgo-kind=pgo-sample-use-pipeline -sample-profile-file=%S/Inputs/pseudo-probe-callee-profile-mismatch.prof -pass-remarks=inline -S -o %t 2>&1 | FileCheck %s --check-prefix=INLINE
+; RUN: FileCheck %s < %t
+; RUN: FileCheck %s < %t --check-prefix=MERGE
+
+
+; Make sure bar is inlined into main for attr merging verification.
+; INLINE: 'bar' inlined into 'main'
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @baz() #0 {
+entry:
+ ret i32 0
+}
+
+define i32 @bar() #0 !dbg !11 {
+; CHECK: define {{.*}} @bar() {{.*}} #[[#BAR_ATTR:]] !
+entry:
+ %call = call i32 @baz()
+ ret i32 0
+}
+
+define i32 @main() #0 {
+; MERGE: define {{.*}} @main() {{.*}} #[[#MAIN_ATTR:]] !
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %call = call i32 @bar(), !dbg !14
+ br label %for.cond
+}
+
+; CHECK: attributes #[[#BAR_ATTR]] = {{{.*}} "profile-checksum-mismatch" {{.*}}}
+
+; Verify the attribute is not merged into the caller.
+; MERGE-NOT: attributes #[[#MAIN_ATTR]] = {{{.*}} "profile-checksum-mismatch" {{.*}}}
+
+attributes #0 = { "use-sample-profile" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7}
+!llvm.pseudo_probe_desc = !{!8, !9, !10}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "test.c", directory: "/home", checksumkind: CSK_MD5, checksum: "0df0c950a93a603a7d13f0a9d4623642")
+!2 = !{!3}
+!3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression())
+!4 = distinct !DIGlobalVariable(name: "x", scope: !0, file: !1, line: 2, type: !5, isLocal: false, isDefinition: true)
+!5 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !6)
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = !{i64 7546896869197086323, i64 4294967295, !"baz"}
+!9 = !{i64 -2012135647395072713, i64 281530612780802, !"bar"}
+!10 = !{i64 -2624081020897602054, i64 281582081721716, !"main"}
+!11 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !12, scopeLine: 5, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !13)
+!12 = distinct !DISubroutineType(types: !13)
+!13 = !{}
+!14 = !DILocation(line: 15, column: 10, scope: !15)
+!15 = !DILexicalBlockFile(scope: !16, file: !1, discriminator: 186646591)
+!16 = distinct !DILexicalBlock(scope: !17, file: !1, line: 14, column: 40)
+!17 = distinct !DILexicalBlock(scope: !18, file: !1, line: 14, column: 3)
+!18 = distinct !DILexicalBlock(scope: !19, file: !1, line: 14, column: 3)
+!19 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 12, type: !20, scopeLine: 13, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !13)
+!20 = !DISubroutineType(types: !13)
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll
new file mode 100644
index 0000000..4881937
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll
@@ -0,0 +1,63 @@
+; REQUIRES: x86_64-linux
+; REQUIRES: asserts
+; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-callee-profile-mismatch.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-matcher,sample-profile-impl -pass-remarks=inline 2>&1 | FileCheck %s
+
+
+; CHECK: Run stale profile matching for bar
+; CHECK: Callsite with callee:baz is matched from 4 to 2
+; CHECK: 'baz' inlined into 'main' to match profiling context with (cost=always): preinliner at callsite bar:3:8.4 @ main:3:10.7
+
+; CHECK: Probe descriptor missing for Function bar
+; CHECK: Profile is invalid due to CFG mismatch for Function bar
+
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @main() #0 {
+ %1 = call i32 @bar(), !dbg !13
+ ret i32 0
+}
+
+define available_externally i32 @bar() #1 !dbg !21 {
+ %1 = call i32 @baz(), !dbg !23
+ ret i32 0
+}
+
+define available_externally i32 @baz() #0 !dbg !25 {
+ ret i32 0
+}
+
+attributes #0 = { "use-sample-profile" }
+attributes #1 = { "profile-checksum-mismatch" "use-sample-profile" }
+
+!llvm.dbg.cu = !{!0, !7, !9}
+!llvm.module.flags = !{!11}
+!llvm.pseudo_probe_desc = !{!12}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "test.c", directory: "/home/test", checksumkind: CSK_MD5, checksum: "7220f1a2d70ff869f1a6ab7958e3c393")
+!2 = !{!3}
+!3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression())
+!4 = distinct !DIGlobalVariable(name: "x", scope: !0, file: !1, line: 2, type: !5, isLocal: false, isDefinition: true)
+!5 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !6)
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = distinct !DICompileUnit(language: DW_LANG_C11, file: !8, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!8 = !DIFile(filename: "test1.v1.c", directory: "/home/test", checksumkind: CSK_MD5, checksum: "76696bd6bfe16a9f227fe03cfdb6a82c")
+!9 = distinct !DICompileUnit(language: DW_LANG_C11, file: !10, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!10 = !DIFile(filename: "test2.c", directory: "/home/test", checksumkind: CSK_MD5, checksum: "553093afc026f9c73562eb3b0c5b7532")
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i64 -2624081020897602054, i64 281582081721716, !"main"}
+!13 = !DILocation(line: 8, column: 10, scope: !14)
+!14 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 186646591)
+!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 7, column: 40)
+!16 = distinct !DILexicalBlock(scope: !17, file: !1, line: 7, column: 3)
+!17 = distinct !DILexicalBlock(scope: !18, file: !1, line: 7, column: 3)
+!18 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 5, type: !19, scopeLine: 6, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !20)
+!19 = distinct !DISubroutineType(types: !20)
+!20 = !{}
+!21 = distinct !DISubprogram(name: "bar", scope: !8, file: !8, line: 3, type: !22, scopeLine: 3, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !7, retainedNodes: !20)
+!22 = !DISubroutineType(types: !20)
+!23 = !DILocation(line: 6, column: 8, scope: !24)
+!24 = !DILexicalBlockFile(scope: !21, file: !8, discriminator: 186646567)
+!25 = distinct !DISubprogram(name: "baz", scope: !10, file: !10, line: 1, type: !22, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !9, retainedNodes: !20)
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-eh.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-eh.ll
index 697ef44..9954914 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-eh.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-eh.ll
@@ -18,7 +18,7 @@ entry:
to label %ret unwind label %lpad
ret:
-; CHECK: call void @llvm.pseudoprobe
+; CHECK-NOT: call void @llvm.pseudoprobe
ret void
lpad: ; preds = %entry
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-invoke.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-invoke.ll
new file mode 100644
index 0000000..822ab40
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-invoke.ll
@@ -0,0 +1,155 @@
+; REQUIRES: x86_64-linux
+; RUN: opt < %s -passes=pseudo-probe -S -o - | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+$__clang_call_terminate = comdat any
+
+@x = dso_local global i32 0, align 4, !dbg !0
+
+; Function Attrs: mustprogress noinline nounwind uwtable
+define dso_local void @_Z3barv() #0 personality ptr @__gxx_personality_v0 !dbg !14 {
+entry:
+; CHECK: call void @llvm.pseudoprobe(i64 -1069303473483922844, i64 1
+ %0 = load volatile i32, ptr @x, align 4, !dbg !17, !tbaa !19
+ %tobool = icmp ne i32 %0, 0, !dbg !17
+ br i1 %tobool, label %if.then, label %if.else, !dbg !23
+
+if.then: ; preds = %entry
+; CHECK: call void @llvm.pseudoprobe(i64 -1069303473483922844, i64 2
+ invoke void @_Z3foov()
+ to label %invoke.cont unwind label %terminate.lpad, !dbg !24
+
+invoke.cont: ; preds = %if.then
+; CHECK-NOT: call void @llvm.pseudoprobe(i64 -1069303473483922844,
+ invoke void @_Z3bazv()
+ to label %invoke.cont1 unwind label %terminate.lpad, !dbg !26
+
+invoke.cont1: ; preds = %invoke.cont
+; CHECK-NOT: call void @llvm.pseudoprobe(i64 -1069303473483922844,
+ br label %if.end, !dbg !27
+
+if.else: ; preds = %entry
+; CHECK: call void @llvm.pseudoprobe(i64 -1069303473483922844, i64 3
+ invoke void @_Z3foov()
+ to label %invoke.cont2 unwind label %terminate.lpad, !dbg !28
+
+invoke.cont2: ; preds = %if.else
+; CHECK-NOT: call void @llvm.pseudoprobe(i64 -1069303473483922844,
+ br label %if.end
+
+if.end: ; preds = %invoke.cont2, %invoke.cont1
+; CHECK: call void @llvm.pseudoprobe(i64 -1069303473483922844, i64 4
+ invoke void @_Z3foov()
+ to label %invoke.cont3 unwind label %terminate.lpad, !dbg !29
+
+invoke.cont3: ; preds = %if.end
+; CHECK-NOT: call void @llvm.pseudoprobe(i64 -1069303473483922844,
+ %1 = load volatile i32, ptr @x, align 4, !dbg !30, !tbaa !19
+ %tobool4 = icmp ne i32 %1, 0, !dbg !30
+ br i1 %tobool4, label %if.then5, label %if.end6, !dbg !32
+
+if.then5: ; preds = %invoke.cont3
+; CHECK: call void @llvm.pseudoprobe(i64 -1069303473483922844, i64 5
+ %2 = load volatile i32, ptr @x, align 4, !dbg !33, !tbaa !19
+ %inc = add nsw i32 %2, 1, !dbg !33
+ store volatile i32 %inc, ptr @x, align 4, !dbg !33, !tbaa !19
+ br label %if.end6, !dbg !35
+
+if.end6: ; preds = %if.then5, %invoke.cont3
+; CHECK: call void @llvm.pseudoprobe(i64 -1069303473483922844, i64 6
+ ret void, !dbg !36
+
+terminate.lpad: ; preds = %if.end, %if.else, %invoke.cont, %if.then
+; CHECK-NOT: call void @llvm.pseudoprobe(i64 -1069303473483922844,
+ %3 = landingpad { ptr, i32 }
+ catch ptr null, !dbg !24
+ %4 = extractvalue { ptr, i32 } %3, 0, !dbg !24
+ call void @__clang_call_terminate(ptr %4) #3, !dbg !24
+ unreachable, !dbg !24
+}
+
+; Function Attrs: mustprogress noinline nounwind uwtable
+define dso_local void @_Z3foov() #0 !dbg !37 {
+entry:
+ ret void, !dbg !38
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+; Function Attrs: noinline noreturn nounwind uwtable
+define linkonce_odr hidden void @__clang_call_terminate(ptr noundef %0) #1 comdat {
+ %2 = call ptr @__cxa_begin_catch(ptr %0) #4
+ call void @_ZSt9terminatev() #3
+ unreachable
+}
+
+declare ptr @__cxa_begin_catch(ptr)
+
+declare void @_ZSt9terminatev()
+
+; Function Attrs: mustprogress noinline nounwind uwtable
+define dso_local void @_Z3bazv() #0 !dbg !39 {
+entry:
+ ret void, !dbg !40
+}
+
+; CHECK: ![[#]] = !{i64 -3270123626113159616, i64 4294967295, !"_Z3bazv"}
+
+attributes #0 = { mustprogress noinline nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
+attributes #1 = { noinline noreturn nounwind uwtable "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
+attributes #2 = { mustprogress noinline norecurse nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
+attributes #3 = { noreturn nounwind }
+attributes #4 = { nounwind }
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9, !10, !11, !12}
+!llvm.ident = !{!13}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "x", scope: !2, file: !3, line: 1, type: !5, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !3, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: None)
+!3 = !DIFile(filename: "test.cpp", directory: "/home", checksumkind: CSK_MD5, checksum: "a4c7b0392f3fd9c8ebb85065159dbb02")
+!4 = !{!0}
+!5 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !6)
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{i32 7, !"Dwarf Version", i32 5}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{i32 8, !"PIC Level", i32 2}
+!11 = !{i32 7, !"PIE Level", i32 2}
+!12 = !{i32 7, !"uwtable", i32 2}
+!13 = !{!"clang version 19.0.0"}
+!14 = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv", scope: !3, file: !3, line: 4, type: !15, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2)
+!15 = !DISubroutineType(types: !16)
+!16 = !{null}
+!17 = !DILocation(line: 5, column: 6, scope: !18)
+!18 = distinct !DILexicalBlock(scope: !14, file: !3, line: 5, column: 6)
+!19 = !{!20, !20, i64 0}
+!20 = !{!"int", !21, i64 0}
+!21 = !{!"omnipotent char", !22, i64 0}
+!22 = !{!"Simple C++ TBAA"}
+!23 = !DILocation(line: 5, column: 6, scope: !14)
+!24 = !DILocation(line: 6, column: 5, scope: !25)
+!25 = distinct !DILexicalBlock(scope: !18, file: !3, line: 5, column: 9)
+!26 = !DILocation(line: 7, column: 5, scope: !25)
+!27 = !DILocation(line: 8, column: 3, scope: !25)
+!28 = !DILocation(line: 9, column: 5, scope: !18)
+!29 = !DILocation(line: 11, column: 3, scope: !14)
+!30 = !DILocation(line: 12, column: 6, scope: !31)
+!31 = distinct !DILexicalBlock(scope: !14, file: !3, line: 12, column: 6)
+!32 = !DILocation(line: 12, column: 6, scope: !14)
+!33 = !DILocation(line: 13, column: 5, scope: !34)
+!34 = distinct !DILexicalBlock(scope: !31, file: !3, line: 12, column: 9)
+!35 = !DILocation(line: 14, column: 5, scope: !34)
+!36 = !DILocation(line: 17, column: 1, scope: !14)
+!37 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !3, file: !3, line: 19, type: !15, scopeLine: 19, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2)
+!38 = !DILocation(line: 19, column: 13, scope: !37)
+!39 = distinct !DISubprogram(name: "baz", linkageName: "_Z3bazv", scope: !3, file: !3, line: 18, type: !15, scopeLine: 18, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2)
+!40 = !DILocation(line: 18, column: 13, scope: !39)
+!41 = distinct !DISubprogram(name: "main", scope: !3, file: !3, line: 22, type: !42, scopeLine: 22, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2)
+!42 = !DISubroutineType(types: !43)
+!43 = !{!6}
+!44 = !DILocation(line: 23, column: 3, scope: !41)
+!45 = !DILocation(line: 24, column: 1, scope: !41)
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll
new file mode 100644
index 0000000..2bb8f67
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll
@@ -0,0 +1,7 @@
+; REQUIRES: x86_64-linux
+; RUN: not opt < %S/pseudo-probe-profile-mismatch.ll -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-profile-mismatch.prof -min-functions-for-staleness-error=1 -precent-mismatch-for-staleness-error=1 -S 2>&1 | FileCheck %s
+; RUN: opt < %S/pseudo-probe-profile-mismatch.ll -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-profile-mismatch.prof -min-functions-for-staleness-error=3 -precent-mismatch-for-staleness-error=70 -S 2>&1
+; RUN: opt < %S/pseudo-probe-profile-mismatch.ll -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-profile-mismatch.prof -min-functions-for-staleness-error=4 -precent-mismatch-for-staleness-error=1 -S 2>&1
+
+
+; CHECK: error: {{.*}}: The input profile significantly mismatches current source code. Please recollect profile to avoid performance regression.
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll
index 55225b4..7aabeeca 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll
@@ -1,6 +1,6 @@
; REQUIRES: x86_64-linux
; REQUIRES: asserts
-; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching-lto.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-impl 2>&1 | FileCheck %s
+; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching-lto.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-matcher,sample-profile-impl 2>&1 | FileCheck %s
; CHECK: Run stale profile matching for main
@@ -106,7 +106,7 @@ define available_externally dso_local i32 @bar(i32 noundef %0) local_unnamed_add
ret i32 %2, !dbg !132
}
-attributes #0 = { nounwind uwtable "disable-tail-calls"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
+attributes #0 = { nounwind uwtable "disable-tail-calls"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" "profile-checksum-mismatch"}
attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
attributes #3 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) uwtable "disable-tail-calls"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
index 89477ea5..0d471e4 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
@@ -1,6 +1,6 @@
; REQUIRES: x86_64-linux
; REQUIRES: asserts
-; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-impl 2>&1 | FileCheck %s
+; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-matcher,sample-profile-impl 2>&1 | FileCheck %s
; The profiled source code:
@@ -48,6 +48,8 @@
; }
; }
+; Verify not running profile matching for checksum matched function.
+; CHECK-NOT: Run stale profile matching for bar
; CHECK: Run stale profile matching for main
diff --git a/llvm/test/Transforms/SampleProfile/remarks-hotness.ll b/llvm/test/Transforms/SampleProfile/remarks-hotness.ll
index b90b21e..36fb3c5 100644
--- a/llvm/test/Transforms/SampleProfile/remarks-hotness.ll
+++ b/llvm/test/Transforms/SampleProfile/remarks-hotness.ll
@@ -24,7 +24,7 @@
; YAML-PASS: --- !Passed
; YAML-PASS-NEXT: Pass: inline
-; YAML-PASS-NEXT: Name: AlwaysInline
+; YAML-PASS-NEXT: Name: Inlined
; YAML-PASS-NEXT: DebugLoc: { File: remarks-hotness.cpp, Line: 10, Column: 10 }
; YAML-PASS-NEXT: Function: _Z7caller1v
; YAML-PASS-NEXT: Hotness: 401
@@ -36,7 +36,7 @@
; YAML-MISS-NEXT: Function: _Z7caller2v
; YAML-MISS-NEXT: Hotness: 2
-; CHECK-RPASS: '_Z7callee1v' inlined into '_Z7caller1v' with (cost=always): benefit over cost at callsite _Z7caller1v:1:10; (hotness: 401)
+; CHECK-RPASS: '_Z7callee1v' inlined into '_Z7caller1v' with (cost=-30, threshold=4500) at callsite _Z7caller1v:1:10; (hotness: 401)
; CHECK-RPASS-NOT: '_Z7callee2v' not inlined into '_Z7caller2v' because it should never be inlined (cost=never): noinline function attribute (hotness: 2)
; ModuleID = 'remarks-hotness.cpp'
diff --git a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
index a081edd..4a4c940 100644
--- a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
+++ b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
@@ -64,8 +64,8 @@ define float @PR39535min_switch(i64 %i, float %x) {
; CHECK-LABEL: @PR39535min_switch(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i64 [[I:%.*]], label [[END:%.*]] [
-; CHECK-NEXT: i64 1, label [[BB1:%.*]]
-; CHECK-NEXT: i64 2, label [[BB2:%.*]]
+; CHECK-NEXT: i64 1, label [[BB1:%.*]]
+; CHECK-NEXT: i64 2, label [[BB2:%.*]]
; CHECK-NEXT: ]
; CHECK: bb1:
; CHECK-NEXT: br label [[END]]
@@ -154,3 +154,33 @@ F:
%z2 = or disjoint i32 %x, %y
ret i32 %z2
}
+
+define i16 @hoist_trunc_flags_preserve(i1 %C, i32 %x) {
+; CHECK-LABEL: @hoist_trunc_flags_preserve(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[Z1:%.*]] = trunc nuw nsw i32 [[X:%.*]] to i16
+; CHECK-NEXT: ret i16 [[Z1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %z1 = trunc nsw nuw i32 %x to i16
+ ret i16 %z1
+F:
+ %z2 = trunc nsw nuw i32 %x to i16
+ ret i16 %z2
+}
+
+define i16 @hoist_trunc_flags_drop(i1 %C, i32 %x) {
+; CHECK-LABEL: @hoist_trunc_flags_drop(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[Z1:%.*]] = trunc i32 [[X:%.*]] to i16
+; CHECK-NEXT: ret i16 [[Z1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %z1 = trunc i32 %x to i16
+ ret i16 %z1
+F:
+ %z2 = trunc nsw nuw i32 %x to i16
+ ret i16 %z2
+}
diff --git a/llvm/test/Transforms/SpeculativeExecution/PR46267.ll b/llvm/test/Transforms/SpeculativeExecution/PR46267.ll
index d940ee6..69dac22 100644
--- a/llvm/test/Transforms/SpeculativeExecution/PR46267.ll
+++ b/llvm/test/Transforms/SpeculativeExecution/PR46267.ll
@@ -31,7 +31,6 @@ define void @f(i32 %i) {
entry:
; CHECK-LABEL: @f(
; CHECK: %a2 = add i32 %i, 0
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 %a2
br i1 undef, label %land.rhs, label %land.end
land.rhs: ; preds = %entry
@@ -42,6 +41,7 @@ land.rhs: ; preds = %entry
; CHECK-NEXT: %a0 = load i32, ptr undef, align 1
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 %a0
; CHECK-NEXT: call void @llvm.dbg.label
+; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 %a2
call void @llvm.dbg.label(metadata !11), !dbg !10
%y = alloca i32, align 4
call void @llvm.dbg.declare(metadata ptr %y, metadata !14, metadata !DIExpression()), !dbg !10
diff --git a/llvm/test/Transforms/TailCallElim/debugloc.ll b/llvm/test/Transforms/TailCallElim/debugloc.ll
index 3abbd65..4995769 100644
--- a/llvm/test/Transforms/TailCallElim/debugloc.ll
+++ b/llvm/test/Transforms/TailCallElim/debugloc.ll
@@ -4,13 +4,13 @@
define void @foo() {
entry:
; CHECK-LABEL: entry:
-; CHECK: br label %tailrecurse, !dbg ![[DbgLoc:[0-9]+]]
+; CHECK: br label %tailrecurse{{$}}
call void @foo() ;; line 1
ret void
; CHECK-LABEL: tailrecurse:
-; CHECK: br label %tailrecurse, !dbg ![[DbgLoc]]
+; CHECK: br label %tailrecurse, !dbg ![[DbgLoc:[0-9]+]]
}
;; Make sure tailrecurse has the call instruction's DL
diff --git a/llvm/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll b/llvm/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
index 8e78921..a51c1d2 100644
--- a/llvm/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
+++ b/llvm/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
@@ -12,7 +12,7 @@ target triple = "x86_64-unknown-linux-gnu"
; M0: @global = local_unnamed_addr global
; M1-NOT: @global
-@global = local_unnamed_addr global %struct.hoge { %struct.widget { ptr getelementptr inbounds ({ [3 x ptr] }, ptr @global.1, i32 0, inrange i32 0, i32 2) } }, align 8
+@global = local_unnamed_addr global %struct.hoge { %struct.widget { ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @global.1, i32 0, i32 0, i32 2) } }, align 8
; M0: @global.1 = external unnamed_addr constant
; M1: @global.1 = linkonce_odr unnamed_addr constant
diff --git a/llvm/test/Transforms/VectorCombine/X86/shuffle-inseltpoison.ll b/llvm/test/Transforms/VectorCombine/X86/shuffle-inseltpoison.ll
index 8c5c665..74a58c8 100644
--- a/llvm/test/Transforms/VectorCombine/X86/shuffle-inseltpoison.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/shuffle-inseltpoison.ll
@@ -133,8 +133,7 @@ define <2 x i64> @PR35454_1(<2 x i64> %v) {
; SSE-NEXT: ret <2 x i64> [[BC3]]
;
; AVX-LABEL: @PR35454_1(
-; AVX-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[V:%.*]] to <4 x i32>
-; AVX-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[BC]] to <16 x i8>
+; AVX-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[V:%.*]] to <16 x i8>
; AVX-NEXT: [[BC1:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
; AVX-NEXT: [[ADD:%.*]] = shl <16 x i8> [[BC1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; AVX-NEXT: [[BC2:%.*]] = bitcast <16 x i8> [[ADD]] to <4 x i32>
@@ -164,8 +163,7 @@ define <2 x i64> @PR35454_2(<2 x i64> %v) {
; SSE-NEXT: ret <2 x i64> [[BC3]]
;
; AVX-LABEL: @PR35454_2(
-; AVX-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[V:%.*]] to <4 x i32>
-; AVX-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[BC]] to <8 x i16>
+; AVX-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[V:%.*]] to <8 x i16>
; AVX-NEXT: [[BC1:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
; AVX-NEXT: [[ADD:%.*]] = shl <8 x i16> [[BC1]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; AVX-NEXT: [[BC2:%.*]] = bitcast <8 x i16> [[ADD]] to <4 x i32>
diff --git a/llvm/test/Transforms/VectorCombine/X86/shuffle.ll b/llvm/test/Transforms/VectorCombine/X86/shuffle.ll
index 60cfc4d..d1484fd 100644
--- a/llvm/test/Transforms/VectorCombine/X86/shuffle.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/shuffle.ll
@@ -133,8 +133,7 @@ define <2 x i64> @PR35454_1(<2 x i64> %v) {
; SSE-NEXT: ret <2 x i64> [[BC3]]
;
; AVX-LABEL: @PR35454_1(
-; AVX-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[V:%.*]] to <4 x i32>
-; AVX-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[BC]] to <16 x i8>
+; AVX-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[V:%.*]] to <16 x i8>
; AVX-NEXT: [[BC1:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
; AVX-NEXT: [[ADD:%.*]] = shl <16 x i8> [[BC1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; AVX-NEXT: [[BC2:%.*]] = bitcast <16 x i8> [[ADD]] to <4 x i32>
@@ -164,8 +163,7 @@ define <2 x i64> @PR35454_2(<2 x i64> %v) {
; SSE-NEXT: ret <2 x i64> [[BC3]]
;
; AVX-LABEL: @PR35454_2(
-; AVX-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[V:%.*]] to <4 x i32>
-; AVX-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[BC]] to <8 x i16>
+; AVX-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[V:%.*]] to <8 x i16>
; AVX-NEXT: [[BC1:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
; AVX-NEXT: [[ADD:%.*]] = shl <8 x i16> [[BC1]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; AVX-NEXT: [[BC2:%.*]] = bitcast <8 x i16> [[ADD]] to <4 x i32>
diff --git a/llvm/test/Verifier/intrinsic-cmp.ll b/llvm/test/Verifier/intrinsic-cmp.ll
new file mode 100644
index 0000000..2224a5c
--- /dev/null
+++ b/llvm/test/Verifier/intrinsic-cmp.ll
@@ -0,0 +1,22 @@
+; RUN: not opt -S -passes=verify 2>&1 < %s | FileCheck %s
+
+define void @matching_vector_lens(<4 x i32> %arg1, <4 x i32> %arg2) {
+ ; CHECK: return type and arguments must have the same number of elements
+ %res = call <8 x i32> @llvm.scmp.v8i32.v4i32(<4 x i32> %arg1, <4 x i32> %arg2)
+ ret void
+}
+
+define void @result_len_is_at_least_2bits_wide(i32 %arg1, i32 %arg2) {
+ ; CHECK: result type must be at least 2 bits wide
+ %res2 = call i1 @llvm.scmp.i1.i32(i32 %arg1, i32 %arg2)
+ ret void
+}
+
+define void @both_args_are_vecs_or_neither(<4 x i32> %arg1, i32 %arg2) {
+ ; CHECK: ucmp/scmp argument and result types must both be either vector or scalar types
+ %res3 = call i2 @llvm.scmp.i2.v4i32(<4 x i32> %arg1, <4 x i32> %arg1)
+ ; CHECK: ucmp/scmp argument and result types must both be either vector or scalar types
+ %res4 = call <4 x i32> @llvm.scmp.v4i32.i32(i32 %arg2, i32 %arg2)
+ ret void
+}
+
diff --git a/llvm/test/Verifier/tbaa-struct.ll b/llvm/test/Verifier/tbaa-struct.ll
new file mode 100644
index 0000000..b8ddc7c
--- /dev/null
+++ b/llvm/test/Verifier/tbaa-struct.ll
@@ -0,0 +1,40 @@
+; RUN: llvm-as < %s 2>&1
+
+; FIXME: The verifer should reject the invalid !tbaa.struct nodes below.
+
+define void @test_overlapping_regions(ptr %a1) {
+ %ld = load i8, ptr %a1, align 1, !tbaa.struct !0
+ ret void
+}
+
+define void @test_size_not_integer(ptr %a1) {
+ store i8 1, ptr %a1, align 1, !tbaa.struct !5
+ ret void
+}
+
+define void @test_offset_not_integer(ptr %a1, ptr %a2) {
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a1, ptr align 8 %a2, i64 16, i1 false), !tbaa.struct !6
+ ret void
+}
+
+define void @test_tbaa_missing(ptr %a1, ptr %a2) {
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a1, ptr align 8 %a2, i64 16, i1 false), !tbaa.struct !7
+ ret void
+}
+
+define void @test_tbaa_invalid(ptr %a1) {
+ store i8 1, ptr %a1, align 1, !tbaa.struct !8
+ ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+
+!0 = !{i64 0, i64 4, !1, i64 1, i64 4, !1}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C++ TBAA"}
+!5 = !{i64 0, !2, !1}
+!6 = !{!2, i64 0, !1}
+!7 = !{i64 0, i64 4, null}
+!8 = !{i64 0, i64 4, !2}
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
index 80145c5..71e82ec 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
@@ -7,7 +7,7 @@ define i64 @i64_test(i64 %i) nounwind readnone {
; CHECK-NEXT: t0: ch,glue = EntryToken
; CHECK-NEXT: t5: i32,ch = LDW_RI<Mem:(load (s32) from %fixed-stack.0)> TargetFrameIndex:i32<-2>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t7: i32 = ADD_I_LO TargetFrameIndex:i32<0>, TargetConstant:i32<0>
-; CHECK-NEXT: t29: i32 = OR_I_LO t7, TargetConstant:i32<4>
+; CHECK-NEXT: t29: i32 = OR_I_LO disjoint t7, TargetConstant:i32<4>
; CHECK-NEXT: t22: i32,ch = LDW_RI<Mem:(dereferenceable load (s32) from %ir.loc + 4, basealign 8)> t29, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t5, t22, TargetConstant:i32<0>
; CHECK-NEXT: t3: i32,ch = LDW_RI<Mem:(load (s32) from %fixed-stack.1, align 8)> TargetFrameIndex:i32<-1>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
@@ -52,7 +52,7 @@ define i64 @i16_test(i16 %i) nounwind readnone {
; CHECK-NEXT: t33: i32,ch = CopyFromReg t0, Register:i32 $r0
; CHECK-NEXT: t14: ch,glue = CopyToReg t0, Register:i32 $rv, t33
; CHECK-NEXT: t1: i32 = ADD_I_LO TargetFrameIndex:i32<-1>, TargetConstant:i32<0>
-; CHECK-NEXT: t21: i32 = OR_I_LO t1, TargetConstant:i32<2>
+; CHECK-NEXT: t21: i32 = OR_I_LO disjoint t1, TargetConstant:i32<2>
; CHECK-NEXT: t23: i32,ch = LDHz_RI<Mem:(load (s16) from %fixed-stack.0 + 2, basealign 4)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t22: i32,ch = LDHz_RI<Mem:(dereferenceable load (s16) from %ir.loc)> TargetFrameIndex:i32<0>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t23, t22, TargetConstant:i32<0>
@@ -75,7 +75,7 @@ define i64 @i8_test(i8 %i) nounwind readnone {
; CHECK-NEXT: t33: i32,ch = CopyFromReg t0, Register:i32 $r0
; CHECK-NEXT: t14: ch,glue = CopyToReg t0, Register:i32 $rv, t33
; CHECK-NEXT: t1: i32 = ADD_I_LO TargetFrameIndex:i32<-1>, TargetConstant:i32<0>
-; CHECK-NEXT: t21: i32 = OR_I_LO t1, TargetConstant:i32<3>
+; CHECK-NEXT: t21: i32 = OR_I_LO disjoint t1, TargetConstant:i32<3>
; CHECK-NEXT: t23: i32,ch = LDBz_RI<Mem:(load (s8) from %fixed-stack.0 + 3, basealign 4)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t22: i32,ch = LDBz_RI<Mem:(dereferenceable load (s8) from %ir.loc)> TargetFrameIndex:i32<0>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t23, t22, TargetConstant:i32<0>
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected
index 02d8870..775649c 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected
@@ -41,7 +41,7 @@ declare void @_Z10sideeffectv()
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -52,7 +52,7 @@ declare void @_Z10sideeffectv()
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected
index 05e5777..a8086ae 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected
@@ -44,7 +44,7 @@ declare void @_Z10sideeffectv()
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -55,7 +55,7 @@ declare void @_Z10sideeffectv()
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected
index 36bcbe3..57de350 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected
@@ -12,7 +12,7 @@ define void @foo(i32) {
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -36,7 +36,7 @@ define void @bar(i32) {
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected
index db7c692..696d5c6e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected
@@ -15,7 +15,7 @@ define void @foo(i32) {
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -39,7 +39,7 @@ define void @bar(i32) {
; CHECK-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; CHECK-NEXT: ret void
+; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.expected
index 1039995..5275870 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.expected
@@ -42,7 +42,7 @@ declare void @_Z10sideeffectv()
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
@@ -53,7 +53,7 @@ declare void @_Z10sideeffectv()
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.globals.expected
index 0001790..712ccb2 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.globals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.generated.globals.expected
@@ -45,7 +45,7 @@ declare void @_Z10sideeffectv()
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
@@ -56,7 +56,7 @@ declare void @_Z10sideeffectv()
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.expected
index e05a57d..b5b12b7 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.expected
@@ -13,7 +13,7 @@ define void @foo(i32) {
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
@@ -37,7 +37,7 @@ define void @bar(i32) {
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.globals.expected
index 17be222..7e2b991 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.globals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs_prefix_reuse.ll.nogenerated.globals.expected
@@ -16,7 +16,7 @@ define void @foo(i32) {
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @foo.cold.1() #[[ATTR2:[0-9]+]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
@@ -40,7 +40,7 @@ define void @bar(i32) {
; REUSE-NEXT: br i1 [[TMP2]], label [[CODEREPL:%.*]], label [[EXIT:%.*]]
; REUSE: codeRepl:
; REUSE-NEXT: call void @bar.cold.1() #[[ATTR2]]
-; REUSE-NEXT: ret void
+; REUSE-NEXT: unreachable
; REUSE: exit:
; REUSE-NEXT: ret void
;
diff --git a/llvm/test/tools/dsymutil/ARM/obfuscated.test b/llvm/test/tools/dsymutil/ARM/obfuscated.test
deleted file mode 100644
index 3443b8e..0000000
--- a/llvm/test/tools/dsymutil/ARM/obfuscated.test
+++ /dev/null
@@ -1,200 +0,0 @@
-REQUIRES: system-darwin
-
-RUN: dsymutil --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck %s
-
-RUN: dsymutil --accelerator=Pub --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck --check-prefix=PUB %s
-
-RUN: dsymutil --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck --check-prefix=NOHIDDEN %s
-
-RUN: dsymutil --symbol-map %p/../Inputs/obfuscated.2.map %p/../Inputs/obfuscated.2.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck --check-prefix=NOHIDDEN %s
-
-// Run with plist and make sure dsymutil finds it.
-RUN: mkdir -p %t.dSYM/Contents/Resources/DWARF/
-RUN: mkdir -p %t.mapdir
-RUN: cp %p/../Inputs/obfuscated.arm64 %t.dSYM/Contents/Resources/DWARF/
-RUN: cp %p/../Inputs/E828A486-8433-3A5E-B6DB-A6294D28133D.plist %t.dSYM/Contents/Resources/
-RUN: cp %p/../Inputs/obfuscated.map %t.mapdir/506AA50A-6B26-3B37-86D2-DC6EBD57B720.bcsymbolmap
-RUN: dsymutil --symbol-map %t.mapdir %t.dSYM 2>&1 | FileCheck --check-prefix=OBFUSCATING %s
-
-// Run without plist and make sure dsymutil doesn't crash.
-RUN: rm %t.dSYM/Contents/Resources/E828A486-8433-3A5E-B6DB-A6294D28133D.plist
-RUN: dsymutil --symbol-map %t.mapdir %t.dSYM 2>&1 | FileCheck --check-prefix=NOTOBFUSCATING %s
-
-// ----------------------------------------
-// Repeat the same steps for --linker parallel.
-RUN: dsymutil --linker parallel --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck %s
-
-RUN: dsymutil --linker parallel --accelerator=Pub --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck --check-prefix=PUB %s
-
-RUN: dsymutil --linker parallel --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck --check-prefix=NOHIDDEN %s
-
-RUN: dsymutil --linker parallel --symbol-map %p/../Inputs/obfuscated.2.map %p/../Inputs/obfuscated.2.arm64 -f -o - \
-RUN: | llvm-dwarfdump -v - \
-RUN: | FileCheck --check-prefix=NOHIDDEN %s
-
-// Run with plist and make sure dsymutil finds it.
-RUN: mkdir -p %t.dSYM/Contents/Resources/DWARF/
-RUN: mkdir -p %t.mapdir
-RUN: cp %p/../Inputs/obfuscated.arm64 %t.dSYM/Contents/Resources/DWARF/
-RUN: cp %p/../Inputs/E828A486-8433-3A5E-B6DB-A6294D28133D.plist %t.dSYM/Contents/Resources/
-RUN: cp %p/../Inputs/obfuscated.map %t.mapdir/506AA50A-6B26-3B37-86D2-DC6EBD57B720.bcsymbolmap
-RUN: dsymutil --linker parallel --symbol-map %t.mapdir %t.dSYM 2>&1 | FileCheck --check-prefix=OBFUSCATING %s
-
-// Run without plist and make sure dsymutil doesn't crash.
-RUN: rm %t.dSYM/Contents/Resources/E828A486-8433-3A5E-B6DB-A6294D28133D.plist
-RUN: dsymutil --linker parallel --symbol-map %t.mapdir %t.dSYM 2>&1 | FileCheck --check-prefix=NOTOBFUSCATING %s
-
-OBFUSCATING-NOT: not unobfuscating
-
-NOTOBFUSCATING: not unobfuscating
-
-NOHIDDEN-NOT: __hidden#
-
-CHECK: .debug_info contents:
-
-CHECK: DW_TAG_compile_unit [1] *
-CHECK: DW_AT_producer [DW_FORM_strp] ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "main.c")
-CHECK: DW_AT_comp_dir [DW_FORM_strp] ( {{.*}} "/Users/steven/dev/alpena/tests/src")
-CHECK: DW_TAG_subprogram [2]
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "main")
-
-CHECK: DW_TAG_compile_unit [1] *
-CHECK: DW_AT_producer [DW_FORM_strp] ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "one.c")
-CHECK: DW_AT_comp_dir [DW_FORM_strp] ( {{.*}} "/Users/steven/dev/alpena/tests/src")
-CHECK: DW_TAG_subprogram [2]
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "one")
-
-CHECK: DW_TAG_compile_unit [1] *
-CHECK: DW_AT_producer [DW_FORM_strp] ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "two.c")
-CHECK: DW_AT_comp_dir [DW_FORM_strp] ( {{.*}} "/Users/steven/dev/alpena/tests/src")
-CHECK: DW_TAG_subprogram [2]
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "two")
-
-CHECK: DW_TAG_compile_unit [1] *
-CHECK: DW_AT_producer [DW_FORM_strp] ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "three.c")
-CHECK: DW_AT_comp_dir [DW_FORM_strp] ( {{.*}} "/Users/steven/dev/alpena/tests/src")
-CHECK: DW_TAG_subprogram [2]
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "three")
-
-CHECK: DW_TAG_compile_unit [1] *
-CHECK: DW_AT_producer [DW_FORM_strp] ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "four.c")
-CHECK: DW_AT_stmt_list [DW_FORM_data4] (0x0000011e)
-CHECK: DW_AT_comp_dir [DW_FORM_strp] ( {{.*}} "/Users/steven/dev/alpena/tests/src")
-CHECK: DW_TAG_subprogram [2]
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "four")
-
-CHECK: DW_TAG_compile_unit [1] *
-CHECK: DW_AT_producer [DW_FORM_strp] ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "five.c")
-CHECK: DW_AT_comp_dir [DW_FORM_strp] ( {{.*}} "/Users/steven/dev/alpena/tests/src")
-CHECK: DW_TAG_subprogram [2]
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "five")
-
-CHECK: DW_TAG_compile_unit [1] *
-CHECK: DW_AT_producer [DW_FORM_strp] ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "six.c")
-CHECK: DW_AT_comp_dir [DW_FORM_strp] ( {{.*}} "/Users/steven/dev/alpena/tests/src")
-CHECK: DW_TAG_subprogram [2]
-CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}} "six")
-
-CHECK: .debug_line contents:
-CHECK: file_names[ 1]:
-CHECK: name: "main.c"
-CHECK: dir_index: 0
-CHECK: mod_time: 0x00000000
-CHECK: file_names[ 1]:
-CHECK: name: "one.c"
-CHECK: dir_index: 0
-CHECK: mod_time: 0x00000000
-CHECK: length: 0x00000000
-CHECK: file_names[ 1]:
-CHECK: name: "two.c"
-CHECK: dir_index: 0
-CHECK: mod_time: 0x00000000
-CHECK: length: 0x00000000
-CHECK: file_names[ 1]:
-CHECK: name: "three.c"
-CHECK: dir_index: 0
-CHECK: mod_time: 0x00000000
-CHECK: length: 0x00000000
-CHECK: file_names[ 1]:
-CHECK: name: "four.c"
-CHECK: dir_index: 0
-CHECK: mod_time: 0x00000000
-CHECK: length: 0x00000000
-CHECK: file_names[ 1]:
-CHECK: name: "five.c"
-CHECK: dir_index: 0
-CHECK: mod_time: 0x00000000
-CHECK: length: 0x00000000
-CHECK: file_names[ 1]:
-CHECK: name: "six.c"
-CHECK: dir_index: 0
-CHECK: mod_time: 0x00000000
-CHECK: length: 0x00000000
-
-PUB: .debug_pubnames contents:
-PUB: length = 0x00000017, format = DWARF32, version = 0x0002, unit_offset = 0x00000000, unit_size = 0x00000044
-PUB: 0x0000002e "main"
-PUB: length = 0x00000016, format = DWARF32, version = 0x0002, unit_offset = 0x00000044, unit_size = 0x00000044
-PUB: 0x0000002e "one"
-PUB: length = 0x00000016, format = DWARF32, version = 0x0002, unit_offset = 0x00000088, unit_size = 0x00000044
-PUB: 0x0000002e "two"
-PUB: length = 0x00000018, format = DWARF32, version = 0x0002, unit_offset = 0x000000cc, unit_size = 0x00000044
-PUB: 0x0000002e "three"
-PUB: length = 0x00000017, format = DWARF32, version = 0x0002, unit_offset = 0x00000110, unit_size = 0x00000044
-PUB: 0x0000002e "four"
-PUB: length = 0x00000017, format = DWARF32, version = 0x0002, unit_offset = 0x00000154, unit_size = 0x00000044
-PUB: 0x0000002e "five"
-PUB: length = 0x00000016, format = DWARF32, version = 0x0002, unit_offset = 0x00000198, unit_size = 0x00000044
-PUB: 0x0000002e "six"
-
-CHECK: .apple_names contents:
-
-CHECK: String: 0x00000091 "five"
-CHECK-NEXT: Data 0 [
-CHECK-NEXT: Atom[0]: 0x00000182
-CHECK-NEXT: ]
-CHECK: String: 0x0000009c "six"
-CHECK-NEXT: Data 0 [
-CHECK-NEXT: Atom[0]: 0x000001c6
-CHECK-NEXT: ]
-CHECK: String: 0x00000078 "three"
-CHECK-NEXT: Data 0 [
-CHECK-NEXT: Atom[0]: 0x000000fa
-CHECK-NEXT: ]
-CHECK: String: 0x0000006c "two"
-CHECK-NEXT: Data 0 [
-CHECK-NEXT: Atom[0]: 0x000000b6
-CHECK-NEXT: ]
-CHECK: String: 0x00000057 "main"
-CHECK-NEXT: Data 0 [
-CHECK-NEXT: Atom[0]: 0x0000002e
-CHECK-NEXT: ]
-CHECK: String: 0x00000085 "four"
-CHECK-NEXT: Data 0 [
-CHECK-NEXT: Atom[0]: 0x0000013e
-CHECK-NEXT: ]
-CHECK: String: 0x00000062 "one"
-CHECK-NEXT: Data 0 [
-CHECK-NEXT: Atom[0]: 0x00000072
-CHECK-NEXT: ]
diff --git a/llvm/test/tools/dsymutil/Inputs/obfuscated.2.arm64 b/llvm/test/tools/dsymutil/Inputs/obfuscated.2.arm64
deleted file mode 100644
index b40e023..0000000
--- a/llvm/test/tools/dsymutil/Inputs/obfuscated.2.arm64
+++ /dev/null
Binary files differ
diff --git a/llvm/test/tools/dsymutil/Inputs/obfuscated.2.map b/llvm/test/tools/dsymutil/Inputs/obfuscated.2.map
deleted file mode 100644
index 6efca59..0000000
--- a/llvm/test/tools/dsymutil/Inputs/obfuscated.2.map
+++ /dev/null
@@ -1,22 +0,0 @@
-BCSymbolMap Version: 2.0
-_two
-_three
-_four
-_five
-_six
-LLVM version 3.9.0 (ssh://git@stash.sd.apple.com/devtools/clang.git c74ae34bd917b77f9c848bd599dfde2813fb509f)
-main
-main.c
-/Volumes/Data/dev/BitcodeBuildTests/unit
-one
-one.c
-two
-two.c
-three
-three.c
-four
-four.c
-five
-five.c
-six
-six.c
diff --git a/llvm/test/tools/dsymutil/Inputs/obfuscated.arm64 b/llvm/test/tools/dsymutil/Inputs/obfuscated.arm64
deleted file mode 100644
index 8395798..0000000
--- a/llvm/test/tools/dsymutil/Inputs/obfuscated.arm64
+++ /dev/null
Binary files differ
diff --git a/llvm/test/tools/dsymutil/Inputs/obfuscated.map b/llvm/test/tools/dsymutil/Inputs/obfuscated.map
deleted file mode 100644
index 30fed8b..0000000
--- a/llvm/test/tools/dsymutil/Inputs/obfuscated.map
+++ /dev/null
@@ -1,17 +0,0 @@
-one
-two
-three
-four
-five
-six
-.str
-Apple LLVM version 7.0.0 (clang-700.2.38.2)
-main
-main.c
-/Users/steven/dev/alpena/tests/src
-one.c
-two.c
-three.c
-four.c
-five.c
-six.c
diff --git a/llvm/test/tools/dsymutil/cmdline.test b/llvm/test/tools/dsymutil/cmdline.test
index 36cf3f5..814252b 100644
--- a/llvm/test/tools/dsymutil/cmdline.test
+++ b/llvm/test/tools/dsymutil/cmdline.test
@@ -28,7 +28,6 @@ CHECK: -remarks-output-format <format>
CHECK: -remarks-prepend-path <path>
CHECK: -reproducer <mode>
CHECK: -statistics
-CHECK: -symbol-map
CHECK: -symtab
CHECK: {{-S}}
CHECK: -toolchain
diff --git a/llvm/test/tools/dxil-dis/debug-info.ll b/llvm/test/tools/dxil-dis/debug-info.ll
index 92dc654..96e0233 100644
--- a/llvm/test/tools/dxil-dis/debug-info.ll
+++ b/llvm/test/tools/dxil-dis/debug-info.ll
@@ -1,4 +1,4 @@
-; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s
+; RUN: llc --filetype=obj %s -o - -experimental-debuginfo-iterators=false | dxil-dis -o - | FileCheck %s
target triple = "dxil-unknown-shadermodel6.7-library"
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/tools/llc/new-pm/machine-function-properties.mir b/llvm/test/tools/llc/new-pm/machine-function-properties.mir
new file mode 100644
index 0000000..a9eb88e
--- /dev/null
+++ b/llvm/test/tools/llc/new-pm/machine-function-properties.mir
@@ -0,0 +1,12 @@
+# REQUIRES: asserts
+# RUN: not --crash llc -mtriple=x86_64-pc-linux-gnu -passes=require-all-machine-function-properties -filetype=null %s 2>&1 | FileCheck %s
+
+# CHECK: MachineFunctionProperties required by RequireAllMachineFunctionPropertiesPass pass are not met by function f.
+
+---
+name: f
+selected: false
+body: |
+ bb.0:
+ RET 0
+...
diff --git a/llvm/test/tools/llvm-ar/coff-symtab.test b/llvm/test/tools/llvm-ar/coff-symtab.test
new file mode 100644
index 0000000..4a57472
--- /dev/null
+++ b/llvm/test/tools/llvm-ar/coff-symtab.test
@@ -0,0 +1,91 @@
+Verify that llvm-ar uses COFF archive format by ensuring that archive map is sorted.
+
+RUN: rm -rf %t.dir && split-file %s %t.dir && cd %t.dir
+
+RUN: yaml2obj coff-symtab.yaml -o coff-symtab.obj
+RUN: llvm-ar crs out.a coff-symtab.obj
+RUN: llvm-nm --print-armap out.a | FileCheck %s
+
+RUN: llvm-as coff-symtab.ll -o coff-symtab.bc
+RUN: llvm-ar crs out2.a coff-symtab.bc
+RUN: llvm-nm --print-armap out2.a | FileCheck %s
+
+RUN: yaml2obj elf.yaml -o coff-symtab.o
+RUN: llvm-ar crs --format coff out3.a coff-symtab.o
+RUN: llvm-nm --print-armap out3.a | FileCheck %s
+
+Create an empty archive with no symbol map, add a COFF file to it and check that the output archive is a COFF archive.
+
+RUN: llvm-ar --format coff rcS out4.a
+RUN: llvm-ar rs out4.a coff-symtab.obj
+RUN: llvm-nm --print-armap out4.a | FileCheck %s
+
+CHECK: Archive map
+CHECK-NEXT: a in coff-symtab
+CHECK-NEXT: b in coff-symtab
+CHECK-NEXT: c in coff-symtab
+CHECK-EMPTY:
+
+#--- coff-symtab.yaml
+--- !COFF
+header:
+ Machine: IMAGE_FILE_MACHINE_UNKNOWN
+ Characteristics: [ ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ Alignment: 4
+ SectionData: ''
+symbols:
+ - Name: b
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+ - Name: c
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+ - Name: a
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+...
+
+
+#--- coff-symtab.ll
+target triple = "x86_64-unknown-windows-msvc"
+
+define void @b() { ret void }
+define void @c() { ret void }
+define void @a() { ret void }
+
+#--- elf.yaml
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data : ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ AddressAlign: 0x0000000000000004
+ Content: ''
+Symbols:
+ - Name: b
+ Binding: STB_GLOBAL
+ Section: .text
+ - Name: c
+ Binding: STB_GLOBAL
+ Section: .text
+ - Name: a
+ Binding: STB_GLOBAL
+ Section: .text
+...
diff --git a/llvm/test/tools/llvm-ar/ecsymbols.ll b/llvm/test/tools/llvm-ar/ecsymbols.ll
new file mode 100644
index 0000000..f49c991
--- /dev/null
+++ b/llvm/test/tools/llvm-ar/ecsymbols.ll
@@ -0,0 +1,19 @@
+;; Test that ECSYMBOLS section is created when ARM64EC bitcode is used.
+
+; RUN: llvm-as %s -o %t.bc
+; RUN: rm -f %t.a
+; RUN: llvm-ar cr %t.a %t.bc
+; RUN: llvm-nm --print-armap %t.a | FileCheck %s
+
+; CHECK-NOT: Archive map
+; CHECK: Archive EC map
+; CHECK-NEXT: a in ecsymbols.ll.tmp.bc
+; CHECK-NEXT: b in ecsymbols.ll.tmp.bc
+; CHECK-NEXT: c in ecsymbols.ll.tmp.bc
+; CHECK-EMPTY:
+
+target triple = "arm64ec-unknown-windows-msvc"
+
+define void @b() { ret void }
+define void @c() { ret void }
+define void @a() { ret void }
diff --git a/llvm/test/tools/llvm-ar/ecsymbols.yaml b/llvm/test/tools/llvm-ar/ecsymbols.yaml
new file mode 100644
index 0000000..6cfe78d
--- /dev/null
+++ b/llvm/test/tools/llvm-ar/ecsymbols.yaml
@@ -0,0 +1,84 @@
+## Test that ECSYMBOLS section is created when ARM64EC is used.
+
+# RUN: yaml2obj %s -o %t.arm64ec.obj -DMACHINE=IMAGE_FILE_MACHINE_ARM64EC
+# RUN: yaml2obj %s -o %t.arm64.obj -DMACHINE=IMAGE_FILE_MACHINE_ARM64
+# RUN: yaml2obj %s -o %t.amd64.obj -DMACHINE=IMAGE_FILE_MACHINE_AMD64
+
+## Create ARM64EC archive.
+# RUN: rm -f %t*.a
+# RUN: llvm-ar cr %t1.a %t.arm64ec.obj
+# RUN: llvm-nm --print-armap %t1.a | FileCheck --check-prefixes=NOMAP,ECMAP %s
+
+## Add ARM64 object to the archive.
+# RUN: llvm-ar r %t1.a %t.arm64.obj
+# RUN: llvm-nm --print-armap %t1.a | FileCheck --check-prefixes=MAP,ECMAP %s
+
+## Create ARM64 archive.
+# RUN: llvm-ar cr %t2.a %t.arm64.obj
+# RUN: llvm-nm --print-armap %t2.a | FileCheck --check-prefixes=MAP,NOECMAP %s
+
+## Add ARM64EC object to the archive.
+# RUN: llvm-ar r %t2.a %t.arm64ec.obj
+# RUN: llvm-nm --print-armap %t2.a | FileCheck --check-prefixes=MAP,ECMAP %s
+
+## Create mixed archive with ARM64 and ARM64EC members.
+# RUN: llvm-ar cr %t3.a %t.arm64ec.obj %t.arm64.obj
+# RUN: llvm-nm --print-armap %t3.a | FileCheck --check-prefixes=MAP,ECMAP %s
+
+## Create mixed archive with ARM64 and AMD64 members.
+# RUN: llvm-ar cr %t4.a %t.amd64.obj %t.arm64.obj
+# RUN: llvm-nm --print-armap %t4.a | FileCheck --check-prefixes=MAP,AMDECMAP %s
+
+## Create an archive with no symbol table.
+# RUN: llvm-ar crS %t5.a %t.amd64.obj %t.arm64.obj
+# RUN: llvm-nm --print-armap %t5.a | FileCheck --check-prefixes=NOMAP,NOECMAP %s
+
+# MAP: Archive map
+# MAP-NEXT: a in ecsymbols.yaml.tmp.arm64.obj
+# MAP-NEXT: b in ecsymbols.yaml.tmp.arm64.obj
+# MAP-NEXT: c in ecsymbols.yaml.tmp.arm64.obj
+# MAP-EMPTY:
+# NOMAP-NOT: Archive map
+
+# ECMAP: Archive EC map
+# ECMAP-NEXT: a in ecsymbols.yaml.tmp.arm64ec.obj
+# ECMAP-NEXT: b in ecsymbols.yaml.tmp.arm64ec.obj
+# ECMAP-NEXT: c in ecsymbols.yaml.tmp.arm64ec.obj
+# ECMAP-EMPTY:
+# NOECMAP-NOT: Archive EC map
+
+# AMDECMAP: Archive EC map
+# AMDECMAP-NEXT: a in ecsymbols.yaml.tmp.amd64.obj
+# AMDECMAP-NEXT: b in ecsymbols.yaml.tmp.amd64.obj
+# AMDECMAP-NEXT: c in ecsymbols.yaml.tmp.amd64.obj
+# AMDECMAP-EMPTY:
+
+--- !COFF
+header:
+ Machine: [[MACHINE]]
+ Characteristics: [ ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ Alignment: 4
+ SectionData: ''
+symbols:
+ - Name: b
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+ - Name: c
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+ - Name: a
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/llvm/test/tools/llvm-ar/no-symtab.yaml b/llvm/test/tools/llvm-ar/no-symtab.yaml
new file mode 100644
index 0000000..7370c9b
--- /dev/null
+++ b/llvm/test/tools/llvm-ar/no-symtab.yaml
@@ -0,0 +1,32 @@
+## Create archives with no symtab in various formats and check that we can read them.
+
+# RUN: yaml2obj %s -o %t.o
+# RUN: rm -f %t.*.a
+
+# RUN: llvm-ar --format=gnu rcS %t.gnu.a %t.o
+# RUN: llvm-ar --format=coff rcS %t.coff.a %t.o
+# RUN: llvm-ar --format=darwin rcS %t.darwin.a %t.o
+# RUN: llvm-ar --format=bsd rcS %t.bsd.a %t.o
+# RUN: llvm-ar --format=bigarchive rcS %t.bigarchive.a %t.o
+
+# RUN: llvm-nm --print-armap %t.gnu.a | FileCheck %s
+# RUN: llvm-nm --print-armap %t.coff.a | FileCheck %s
+# RUN: llvm-nm --print-armap %t.darwin.a | FileCheck %s
+# RUN: llvm-nm --print-armap %t.bsd.a | FileCheck %s
+# RUN: llvm-nm --print-armap %t.bigarchive.a | FileCheck %s
+
+# CHECK-NOT: Archive map
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+Symbols:
+ - Name: symbol
+ Binding: STB_GLOBAL
+ Section: .text
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/dw-at-specification.test b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/dw-at-specification.test
index 9c9118e..b1b35e4f 100644
--- a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/dw-at-specification.test
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/dw-at-specification.test
@@ -10,7 +10,7 @@
; The above test compiled with clang++ produces both a DW_AT_type and
; DW_AT_specification on the definition die for S::Arr, which previously caused
-; an assert in the LVELFReader:
+; an assert in the LVDWARFReader:
; $ clang++ -g -c dw-at-specification.cpp -o dw-at-specification.o
; RUN: llvm-debuginfo-analyzer --attribute=level,format,producer \
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-ignored-DW_FORM_implicit_const.test b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-ignored-DW_FORM_implicit_const.test
index 7ee9f31..9df058c 100644
--- a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-ignored-DW_FORM_implicit_const.test
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-ignored-DW_FORM_implicit_const.test
@@ -14,7 +14,7 @@
; DW_AT_decl_file DW_FORM_implicit_const 1
; DW_AT_decl_line DW_FORM_data1
-; Attributes with DW_FORM_implicit_const being ignored by the ELFReader,
+; Attributes with DW_FORM_implicit_const being ignored by the DWARFReader,
; causing {Parameter} and {TypeAlias} to omit line numbers.
; test.cpp
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test
new file mode 100644
index 0000000..f52c9c7
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test
@@ -0,0 +1,106 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 1 - General options
+
+; test.cpp
+; 1 using INTPTR = const int *;
+; 2 int foo(INTPTR ParamPtr, unsigned ParamUnsigned, bool ParamBool) {
+; 3 if (ParamBool) {
+; 4 typedef int INTEGER;
+; 5 const INTEGER CONSTANT = 7;
+; 6 return CONSTANT;
+; 7 }
+; 8 return ParamUnsigned;
+; 9 }
+
+; Compare mode - Logical view.
+; The output shows in view form the 'missing (-), added (+)' elements,
+; giving more context by swapping the reference and target object files.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/test-clang.s -o %t.test-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=level \
+; RUN: --compare=types \
+; RUN: --report=view \
+; RUN: --print=symbols,types \
+; RUN: %t.test-clang.o \
+; RUN: %p/../DWARF/Inputs/test-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Reference: '{{.*}}test-clang.o'
+; ONE-NEXT: Target: 'test-dwarf-gcc.o'
+; ONE-EMPTY:
+; ONE-NEXT: Logical View:
+; ONE-NEXT: [000] {File} '{{.*}}test-clang.o'
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'test.cpp'
+; ONE-NEXT: [002] 1 {TypeAlias} 'INTPTR' -> '* const int'
+; ONE-NEXT: [002] 2 {Function} extern not_inlined 'foo' -> 'int'
+; ONE-NEXT: [003] {Block}
+; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER'
+; ONE-NEXT: +[004] 4 {TypeAlias} 'INTEGER' -> 'int'
+; ONE-NEXT: [003] 2 {Parameter} 'ParamBool' -> 'bool'
+; ONE-NEXT: [003] 2 {Parameter} 'ParamPtr' -> 'INTPTR'
+; ONE-NEXT: [003] 2 {Parameter} 'ParamUnsigned' -> 'unsigned int'
+; ONE-NEXT: -[003] 4 {TypeAlias} 'INTEGER' -> 'int'
+
+; Compare mode - Logical elements.
+; The output shows in tabular form the 'missing (-), added (+)' elements,
+; giving more context by swapping the reference and target object files.
+
+; RUN: llvm-debuginfo-analyzer --attribute=level \
+; RUN: --compare=types \
+; RUN: --report=list \
+; RUN: --print=symbols,types,summary \
+; RUN: %t.test-clang.o \
+; RUN: %p/../DWARF/Inputs/test-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=TWO %s
+
+; TWO: Reference: '{{.*}}test-clang.o'
+; TWO-NEXT: Target: 'test-dwarf-gcc.o'
+; TWO-EMPTY:
+; TWO-NEXT: (1) Missing Types:
+; TWO-NEXT: -[003] 4 {TypeAlias} 'INTEGER' -> 'int'
+; TWO-EMPTY:
+; TWO-NEXT: (1) Added Types:
+; TWO-NEXT: +[004] 4 {TypeAlias} 'INTEGER' -> 'int'
+; TWO-EMPTY:
+; TWO-NEXT: ----------------------------------------
+; TWO-NEXT: Element Expected Missing Added
+; TWO-NEXT: ----------------------------------------
+; TWO-NEXT: Scopes 4 0 0
+; TWO-NEXT: Symbols 0 0 0
+; TWO-NEXT: Types 2 1 1
+; TWO-NEXT: Lines 0 0 0
+; TWO-NEXT: ----------------------------------------
+; TWO-NEXT: Total 6 1 1
+
+; Changing the 'Reference' and 'Target' order:
+
+; RUN: llvm-debuginfo-analyzer --attribute=level \
+; RUN: --compare=types \
+; RUN: --report=list \
+; RUN: --print=symbols,types,summary \
+; RUN: %p/../DWARF/Inputs/test-dwarf-gcc.o \
+; RUN: %t.test-clang.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=THR %s
+
+; THR: Reference: 'test-dwarf-gcc.o'
+; THR-NEXT: Target: '{{.*}}test-clang.o'
+; THR-EMPTY:
+; THR-NEXT: (1) Missing Types:
+; THR-NEXT: -[004] 4 {TypeAlias} 'INTEGER' -> 'int'
+; THR-EMPTY:
+; THR-NEXT: (1) Added Types:
+; THR-NEXT: +[003] 4 {TypeAlias} 'INTEGER' -> 'int'
+; THR-EMPTY:
+; THR-NEXT: ----------------------------------------
+; THR-NEXT: Element Expected Missing Added
+; THR-NEXT: ----------------------------------------
+; THR-NEXT: Scopes 4 0 0
+; THR-NEXT: Symbols 0 0 0
+; THR-NEXT: Types 2 1 1
+; THR-NEXT: Lines 0 0 0
+; THR-NEXT: ----------------------------------------
+; THR-NEXT: Total 6 1 1
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-print-basic-details.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-print-basic-details.test
new file mode 100644
index 0000000..4927086
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-print-basic-details.test
@@ -0,0 +1,120 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 1 - General options.
+
+; test.cpp
+; 1 using INTPTR = const int *;
+; 2 int foo(INTPTR ParamPtr, unsigned ParamUnsigned, bool ParamBool) {
+; 3 if (ParamBool) {
+; 4 typedef int INTEGER;
+; 5 const INTEGER CONSTANT = 7;
+; 6 return CONSTANT;
+; 7 }
+; 8 return ParamUnsigned;
+; 9 }
+
+; Print basic details.
+; The following command prints basic details for all the logical elements
+; sorted by the debug information internal offset; it includes its lexical
+; level and debug info format.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/test-clang.s -o %t.test-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format \
+; RUN: --output-sort=offset \
+; RUN: --print=scopes,symbols,types,lines,instructions \
+; RUN: %t.test-clang.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format \
+; RUN: --output-sort=offset \
+; RUN: --print=elements \
+; RUN: %t.test-clang.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Logical View:
+; ONE-NEXT: [000] {File} '{{.*}}test-clang.o' -> WASM
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'test.cpp'
+; ONE-NEXT: [002] 2 {Function} extern not_inlined 'foo' -> 'int'
+; ONE-NEXT: [003] 2 {Parameter} 'ParamPtr' -> 'INTPTR'
+; ONE-NEXT: [003] 2 {Parameter} 'ParamUnsigned' -> 'unsigned int'
+; ONE-NEXT: [003] 2 {Parameter} 'ParamBool' -> 'bool'
+; ONE-NEXT: [003] {Block}
+; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER'
+; ONE-NEXT: [004] 5 {Line}
+; ONE-NEXT: [004] {Code} 'i32.const 7'
+; ONE-NEXT: [004] {Code} 'local.set 10'
+; ONE-NEXT: [004] {Code} 'local.get 5'
+; ONE-NEXT: [004] {Code} 'local.get 10'
+; ONE-NEXT: [004] {Code} 'i32.store 12'
+; ONE-NEXT: [004] 6 {Line}
+; ONE-NEXT: [004] {Code} 'i32.const 7'
+; ONE-NEXT: [004] {Code} 'local.set 11'
+; ONE-NEXT: [004] {Code} 'local.get 5'
+; ONE-NEXT: [004] {Code} 'local.get 11'
+; ONE-NEXT: [004] {Code} 'i32.store 28'
+; ONE-NEXT: [004] {Code} 'br 1'
+; ONE-NEXT: [004] - {Line}
+; ONE-NEXT: [004] {Code} 'end'
+; ONE-NEXT: [003] 4 {TypeAlias} 'INTEGER' -> 'int'
+; ONE-NEXT: [003] 2 {Line}
+; ONE-NEXT: [003] {Code} 'nop'
+; ONE-NEXT: [003] {Code} 'end'
+; ONE-NEXT: [003] {Code} 'i64.div_s'
+; ONE-NEXT: [003] {Code} 'global.get 0'
+; ONE-NEXT: [003] {Code} 'local.set 3'
+; ONE-NEXT: [003] {Code} 'i32.const 32'
+; ONE-NEXT: [003] {Code} 'local.set 4'
+; ONE-NEXT: [003] {Code} 'local.get 3'
+; ONE-NEXT: [003] {Code} 'local.get 4'
+; ONE-NEXT: [003] {Code} 'i32.sub'
+; ONE-NEXT: [003] {Code} 'local.set 5'
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'local.get 0'
+; ONE-NEXT: [003] {Code} 'i32.store 24'
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'local.get 1'
+; ONE-NEXT: [003] {Code} 'i32.store 20'
+; ONE-NEXT: [003] {Code} 'local.get 2'
+; ONE-NEXT: [003] {Code} 'local.set 6'
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'local.get 6'
+; ONE-NEXT: [003] {Code} 'i32.store8 19'
+; ONE-NEXT: [003] 3 {Line}
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'i32.load8_u 19'
+; ONE-NEXT: [003] {Code} 'local.set 7'
+; ONE-NEXT: [003] 3 {Line}
+; ONE-NEXT: [003] {Code} 'i32.const 1'
+; ONE-NEXT: [003] {Code} 'local.set 8'
+; ONE-NEXT: [003] {Code} 'local.get 7'
+; ONE-NEXT: [003] {Code} 'local.get 8'
+; ONE-NEXT: [003] {Code} 'i32.and'
+; ONE-NEXT: [003] {Code} 'local.set 9'
+; ONE-NEXT: [003] {Code} 'block'
+; ONE-NEXT: [003] {Code} 'block'
+; ONE-NEXT: [003] {Code} 'local.get 9'
+; ONE-NEXT: [003] {Code} 'i32.eqz'
+; ONE-NEXT: [003] {Code} 'br_if 0'
+; ONE-NEXT: [003] 8 {Line}
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'i32.load 20'
+; ONE-NEXT: [003] {Code} 'local.set 12'
+; ONE-NEXT: [003] 8 {Line}
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'local.get 12'
+; ONE-NEXT: [003] {Code} 'i32.store 28'
+; ONE-NEXT: [003] - {Line}
+; ONE-NEXT: [003] {Code} 'end'
+; ONE-NEXT: [003] 9 {Line}
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'i32.load 28'
+; ONE-NEXT: [003] {Code} 'local.set 13'
+; ONE-NEXT: [003] {Code} 'local.get 13'
+; ONE-NEXT: [003] {Code} 'return'
+; ONE-NEXT: [003] {Code} 'end'
+; ONE-NEXT: [003] 9 {Line}
+; ONE-NEXT: [003] {Code} 'unreachable'
+; ONE-NEXT: [002] 1 {TypeAlias} 'INTPTR' -> '* const int'
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-select-logical-elements.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-select-logical-elements.test
new file mode 100644
index 0000000..f50cc2d
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-select-logical-elements.test
@@ -0,0 +1,76 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 1 - General options
+
+; test.cpp
+; 1 using INTPTR = const int *;
+; 2 int foo(INTPTR ParamPtr, unsigned ParamUnsigned, bool ParamBool) {
+; 3 if (ParamBool) {
+; 4 typedef int INTEGER;
+; 5 const INTEGER CONSTANT = 7;
+; 6 return CONSTANT;
+; 7 }
+; 8 return ParamUnsigned;
+; 9 }
+
+; Select logical elements.
+; The following prints all 'instructions', 'symbols' and 'types' that
+; contain 'BLOCK' or '.store' in their names or types, using a tab layout
+; and given the number of matches.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/test-clang.s -o %t.test-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=level \
+; RUN: --select-nocase --select-regex \
+; RUN: --select=BLOCK --select=.store \
+; RUN: --report=list \
+; RUN: --print=symbols,types,instructions,summary \
+; RUN: %t.test-clang.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Logical View:
+; ONE-NEXT: [000] {File} '{{.*}}test-clang.o'
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'test.cpp'
+; ONE-NEXT: [003] {Code} 'block'
+; ONE-NEXT: [003] {Code} 'block'
+; ONE-NEXT: [004] {Code} 'i32.store 12'
+; ONE-NEXT: [003] {Code} 'i32.store 20'
+; ONE-NEXT: [003] {Code} 'i32.store 24'
+; ONE-NEXT: [004] {Code} 'i32.store 28'
+; ONE-NEXT: [003] {Code} 'i32.store 28'
+; ONE-NEXT: [003] {Code} 'i32.store8 19'
+; ONE-EMPTY:
+; ONE-NEXT: -----------------------------
+; ONE-NEXT: Element Total Printed
+; ONE-NEXT: -----------------------------
+; ONE-NEXT: Scopes 3 0
+; ONE-NEXT: Symbols 4 0
+; ONE-NEXT: Types 2 0
+; ONE-NEXT: Lines 62 8
+; ONE-NEXT: -----------------------------
+; ONE-NEXT: Total 71 8
+
+; RUN: llvm-debuginfo-analyzer --attribute=level \
+; RUN: --select-regex --select-nocase \
+; RUN: --select=INTe \
+; RUN: --report=list \
+; RUN: --print=symbols,types \
+; RUN: %t.test-clang.o \
+; RUN: %p/../DWARF/Inputs/test-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=TWO %s
+
+; TWO: Logical View:
+; TWO-NEXT: [000] {File} '{{.*}}test-clang.o'
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'test.cpp'
+; TWO-NEXT: [003] 4 {TypeAlias} 'INTEGER' -> 'int'
+; TWO-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER'
+; TWO-EMPTY:
+; TWO-NEXT: Logical View:
+; TWO-NEXT: [000] {File} 'test-dwarf-gcc.o'
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'test.cpp'
+; TWO-NEXT: [004] 4 {TypeAlias} 'INTEGER' -> 'int'
+; TWO-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER'
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/02-wasm-logical-lines.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/02-wasm-logical-lines.test
new file mode 100644
index 0000000..101f6ab
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/02-wasm-logical-lines.test
@@ -0,0 +1,74 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 2 - Assembler instructions.
+
+; hello-world.cpp
+; 1 extern int printf(const char * format, ... );
+; 2
+; 3 int main()
+; 4 {
+; 5 printf("Hello, World\n");
+; 6 return 0;
+; 7 }
+
+; Logical lines.
+; The logical views shows the intermixed lines and assembler instructions,
+; allowing to compare the code generated by the different toolchains.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/hello-world-clang.s -o %t.hello-world-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format,producer \
+; RUN: --print=lines,instructions \
+; RUN: %t.hello-world-clang.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Logical View:
+; ONE-NEXT: [000] {File} '{{.*}}hello-world-clang.o' -> WASM
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'hello-world.cpp'
+; ONE-NEXT: [002] {Producer} 'clang version 19{{.*}}'
+; ONE-NEXT: [002] 3 {Function} extern not_inlined 'main' -> 'int'
+; ONE-NEXT: [003] 4 {Line}
+; ONE-NEXT: [003] {Code} 'nop'
+; ONE-NEXT: [003] {Code} 'rethrow 127'
+; ONE-NEXT: [003] {Code} 'global.get 0'
+; ONE-NEXT: [003] {Code} 'local.set 0'
+; ONE-NEXT: [003] {Code} 'i32.const 16'
+; ONE-NEXT: [003] {Code} 'local.set 1'
+; ONE-NEXT: [003] {Code} 'local.get 0'
+; ONE-NEXT: [003] {Code} 'local.get 1'
+; ONE-NEXT: [003] {Code} 'i32.sub'
+; ONE-NEXT: [003] {Code} 'local.set 2'
+; ONE-NEXT: [003] {Code} 'local.get 2'
+; ONE-NEXT: [003] {Code} 'global.set 0'
+; ONE-NEXT: [003] {Code} 'i32.const 0'
+; ONE-NEXT: [003] {Code} 'local.set 3'
+; ONE-NEXT: [003] {Code} 'local.get 2'
+; ONE-NEXT: [003] {Code} 'local.get 3'
+; ONE-NEXT: [003] {Code} 'i32.store 12'
+; ONE-NEXT: [003] 5 {Line}
+; ONE-NEXT: [003] {Code} 'i32.const 0'
+; ONE-NEXT: [003] {Code} 'local.set 4'
+; ONE-NEXT: [003] {Code} 'i32.const 0'
+; ONE-NEXT: [003] {Code} 'local.set 5'
+; ONE-NEXT: [003] {Code} 'local.get 4'
+; ONE-NEXT: [003] {Code} 'local.get 5'
+; ONE-NEXT: [003] {Code} 'call 0'
+; ONE-NEXT: [003] {Code} 'drop'
+; ONE-NEXT: [003] 6 {Line}
+; ONE-NEXT: [003] {Code} 'i32.const 0'
+; ONE-NEXT: [003] {Code} 'local.set 6'
+; ONE-NEXT: [003] {Code} 'i32.const 16'
+; ONE-NEXT: [003] {Code} 'local.set 7'
+; ONE-NEXT: [003] {Code} 'local.get 2'
+; ONE-NEXT: [003] {Code} 'local.get 7'
+; ONE-NEXT: [003] {Code} 'i32.add'
+; ONE-NEXT: [003] {Code} 'local.set 8'
+; ONE-NEXT: [003] {Code} 'local.get 8'
+; ONE-NEXT: [003] {Code} 'global.set 0'
+; ONE-NEXT: [003] {Code} 'local.get 6'
+; ONE-NEXT: [003] {Code} 'return'
+; ONE-NEXT: [003] {Code} 'end'
+; ONE-NEXT: [003] 6 {Line}
+; ONE-NEXT: [003] {Code} 'return'
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/03-wasm-incorrect-lexical-scope-typedef.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/03-wasm-incorrect-lexical-scope-typedef.test
new file mode 100644
index 0000000..eb05eca
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/03-wasm-incorrect-lexical-scope-typedef.test
@@ -0,0 +1,135 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 3 - Incorrect lexical scope for typedef.
+
+; pr-44884.cpp
+; 1 int bar(float Input) { return (int)Input; }
+; 2
+; 3 unsigned foo(char Param) {
+; 4 typedef int INT; // ** Definition for INT **
+; 5 INT Value = Param;
+; 6 {
+; 7 typedef float FLOAT; // ** Definition for FLOAT **
+; 8 {
+; 9 FLOAT Added = Value + Param;
+; 10 Value = bar(Added);
+; 11 }
+; 12 }
+; 13 return Value + Param;
+; 14 }
+
+; The lines 4 and 7 contains 2 typedefs, defined at different lexical
+; scopes.
+
+; The above test is used to illustrates a scope issue found in the
+; Clang compiler.
+; PR44884: https://bugs.llvm.org/show_bug.cgi?id=44884
+; PR44229: https://github.com/llvm/llvm-project/issues/44229
+
+; In the following logical views, we can see that the Clang compiler
+; emits both typedefs at the same lexical scope (3), which is wrong.
+; GCC emit correct lexical scope for both typedefs.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/pr-44884-clang.s -o %t.pr-44884-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format,producer \
+; RUN: --output-sort=kind \
+; RUN: --print=symbols,types,lines \
+; RUN: %t.pr-44884-clang.o \
+; RUN: %p/../DWARF/Inputs/pr-44884-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Logical View:
+; ONE-NEXT: [000] {File} '{{.*}}pr-44884-clang.o' -> WASM
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'pr-44884.cpp'
+; ONE-NEXT: [002] {Producer} 'clang version 19{{.*}}'
+; ONE-NEXT: [002] 1 {Function} extern not_inlined 'bar' -> 'int'
+; ONE-NEXT: [003] 1 {Parameter} 'Input' -> 'float'
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [003] - {Line}
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [003] - {Line}
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [002] 3 {Function} extern not_inlined 'foo' -> 'unsigned int'
+; ONE-NEXT: [003] {Block}
+; ONE-NEXT: [004] 9 {Variable} 'Added' -> 'FLOAT'
+; ONE-NEXT: [004] 9 {Line}
+; ONE-NEXT: [004] 9 {Line}
+; ONE-NEXT: [004] 9 {Line}
+; ONE-NEXT: [004] 9 {Line}
+; ONE-NEXT: [004] 9 {Line}
+; ONE-NEXT: [004] 10 {Line}
+; ONE-NEXT: [004] 10 {Line}
+; ONE-NEXT: [004] 10 {Line}
+; ONE-NEXT: [004] 13 {Line}
+; ONE-NEXT: [003] 3 {Parameter} 'Param' -> 'char'
+; ONE-NEXT: [003] 7 {TypeAlias} 'FLOAT' -> 'float'
+; ONE-NEXT: [003] 4 {TypeAlias} 'INT' -> 'int'
+; ONE-NEXT: [003] 5 {Variable} 'Value' -> 'INT'
+; ONE-NEXT: [003] 3 {Line}
+; ONE-NEXT: [003] 5 {Line}
+; ONE-NEXT: [003] 5 {Line}
+; ONE-NEXT: [003] 13 {Line}
+; ONE-NEXT: [003] 13 {Line}
+; ONE-NEXT: [003] 13 {Line}
+; ONE-NEXT: [003] 13 {Line}
+; ONE-EMPTY:
+; ONE-NEXT: Logical View:
+; ONE-NEXT: [000] {File} 'pr-44884-dwarf-gcc.o' -> elf64-x86-64
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'pr-44884.cpp'
+; ONE-NEXT: [002] {Producer} 'GNU C++14 10.3.0 {{.*}}'
+; ONE-NEXT: [002] 1 {Function} extern not_inlined 'bar' -> 'int'
+; ONE-NEXT: [003] 1 {Parameter} 'Input' -> 'float'
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [003] 1 {Line}
+; ONE-NEXT: [002] 3 {Function} extern not_inlined 'foo' -> 'unsigned int'
+; ONE-NEXT: [003] {Block}
+; ONE-NEXT: [004] {Block}
+; ONE-NEXT: [005] 9 {Variable} 'Added' -> 'FLOAT'
+; ONE-NEXT: [005] 9 {Line}
+; ONE-NEXT: [005] 9 {Line}
+; ONE-NEXT: [005] 9 {Line}
+; ONE-NEXT: [005] 10 {Line}
+; ONE-NEXT: [005] 13 {Line}
+; ONE-NEXT: [004] 7 {TypeAlias} 'FLOAT' -> 'float'
+; ONE-NEXT: [003] 3 {Parameter} 'Param' -> 'char'
+; ONE-NEXT: [003] 4 {TypeAlias} 'INT' -> 'int'
+; ONE-NEXT: [003] 5 {Variable} 'Value' -> 'INT'
+; ONE-NEXT: [003] 3 {Line}
+; ONE-NEXT: [003] 5 {Line}
+; ONE-NEXT: [003] 13 {Line}
+; ONE-NEXT: [003] 14 {Line}
+; ONE-NEXT: [003] 14 {Line}
+
+; Using the selection facilities, we can produce a simple tabular
+; output showing just the logical types that are 'Typedef'.
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format \
+; RUN: --output-sort=name \
+; RUN: --select-types=Typedef \
+; RUN: --report=list \
+; RUN: --print=types \
+; RUN: %t.pr-44884-clang.o \
+; RUN: %p/../DWARF/Inputs/pr-44884-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=TWO %s
+
+; TWO: Logical View:
+; TWO-NEXT: [000] {File} '{{.*}}pr-44884-clang.o' -> WASM
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'pr-44884.cpp'
+; TWO-NEXT: [003] 7 {TypeAlias} 'FLOAT' -> 'float'
+; TWO-NEXT: [003] 4 {TypeAlias} 'INT' -> 'int'
+; TWO-EMPTY:
+; TWO-NEXT: Logical View:
+; TWO-NEXT: [000] {File} 'pr-44884-dwarf-gcc.o' -> elf64-x86-64
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'pr-44884.cpp'
+; TWO-NEXT: [004] 7 {TypeAlias} 'FLOAT' -> 'float'
+; TWO-NEXT: [003] 4 {TypeAlias} 'INT' -> 'int'
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/04-wasm-missing-nested-enumerators.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/04-wasm-missing-nested-enumerators.test
new file mode 100644
index 0000000..cafa51c
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/04-wasm-missing-nested-enumerators.test
@@ -0,0 +1,130 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 4 - Missing nested enumerations.
+
+; pr-46466.cpp
+; 1 struct Struct {
+; 2 union Union {
+; 3 enum NestedEnum { RED, BLUE };
+; 4 };
+; 5 Union U;
+; 6 };
+; 7
+; 8 Struct S;
+; 9 int test() {
+; 10 return S.U.BLUE;
+; 11 }
+
+; The above test is used to illustrate a scope issue found in the Clang
+; compiler.
+; PR46466: https://bugs.llvm.org/show_bug.cgi?id=46466
+; PR45811: https://github.com/llvm/llvm-project/issues/45811
+
+; In the following logical views, we can see that the DWARF debug
+; information generated by the Clang compiler does not include any
+; references to the enumerators 'RED' and 'BLUE'. The DWARF generated
+; by GCC, does include such references.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/pr-46466-clang.s -o %t.pr-46466-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format,producer \
+; RUN: --output-sort=name \
+; RUN: --print=symbols,types \
+; RUN: %t.pr-46466-clang.o \
+; RUN: %p/../DWARF/Inputs/pr-46466-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Logical View:
+; ONE-NEXT: [000] {File} '{{.*}}pr-46466-clang.o' -> WASM
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'pr-46466.cpp'
+; ONE-NEXT: [002] {Producer} 'clang version 19{{.*}}'
+; ONE-NEXT: [002] 8 {Variable} extern 'S' -> 'Struct'
+; ONE-NEXT: [002] 1 {Struct} 'Struct'
+; ONE-NEXT: [003] 5 {Member} public 'U' -> 'Union'
+; ONE-EMPTY:
+; ONE-NEXT: Logical View:
+; ONE-NEXT: [000] {File} 'pr-46466-dwarf-gcc.o' -> elf64-x86-64
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'pr-46466.cpp'
+; ONE-NEXT: [002] {Producer} 'GNU C++14 10.3.0 {{.*}}'
+; ONE-NEXT: [002] 8 {Variable} extern 'S' -> 'Struct'
+; ONE-NEXT: [002] 1 {Struct} 'Struct'
+; ONE-NEXT: [003] 5 {Member} public 'U' -> 'Union'
+; ONE-NEXT: [003] 2 {Union} 'Union'
+; ONE-NEXT: [004] 3 {Enumeration} 'NestedEnum' -> 'unsigned int'
+; ONE-NEXT: [005] {Enumerator} 'BLUE' = '0x1'
+; ONE-NEXT: [005] {Enumerator} 'RED' = '0x0'
+
+; Using the selection facilities, we can produce a logical view
+; showing just the logical types that are 'Enumerator' and its
+; parents. The logical view is sorted by the types name.
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format \
+; RUN: --output-sort=name \
+; RUN: --select-types=Enumerator \
+; RUN: --report=parents \
+; RUN: --print=types \
+; RUN: %t.pr-46466-clang.o \
+; RUN: %p/../DWARF/Inputs/pr-46466-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=TWO %s
+
+; TWO: Logical View:
+; TWO-NEXT: [000] {File} '{{.*}}pr-46466-clang.o' -> WASM
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'pr-46466.cpp'
+; TWO-EMPTY:
+; TWO-NEXT: Logical View:
+; TWO-NEXT: [000] {File} 'pr-46466-dwarf-gcc.o' -> elf64-x86-64
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'pr-46466.cpp'
+; TWO-NEXT: [002] 1 {Struct} 'Struct'
+; TWO-NEXT: [003] 2 {Union} 'Union'
+; TWO-NEXT: [004] 3 {Enumeration} 'NestedEnum' -> 'unsigned int'
+; TWO-NEXT: [005] {Enumerator} 'BLUE' = '0x1'
+; TWO-NEXT: [005] {Enumerator} 'RED' = '0x0'
+
+; Using the selection facilities, we can produce a simple tabular output
+; including a summary for the logical types that are 'Enumerator'. The
+; logical view is sorted by the types name.
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format \
+; RUN: --output-sort=name \
+; RUN: --select-types=Enumerator \
+; RUN: --print=types,summary \
+; RUN: %t.pr-46466-clang.o \
+; RUN: %p/../DWARF/Inputs/pr-46466-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=THR %s
+
+; THR: Logical View:
+; THR-NEXT: [000] {File} '{{.*}}pr-46466-clang.o' -> WASM
+; THR-EMPTY:
+; THR-NEXT: [001] {CompileUnit} 'pr-46466.cpp'
+; THR-EMPTY:
+; THR-NEXT: -----------------------------
+; THR-NEXT: Element Total Printed
+; THR-NEXT: -----------------------------
+; THR-NEXT: Scopes 4 0
+; THR-NEXT: Symbols 0 0
+; THR-NEXT: Types 0 0
+; THR-NEXT: Lines 0 0
+; THR-NEXT: -----------------------------
+; THR-NEXT: Total 4 0
+; THR-EMPTY:
+; THR-NEXT: Logical View:
+; THR-NEXT: [000] {File} 'pr-46466-dwarf-gcc.o' -> elf64-x86-64
+; THR-EMPTY:
+; THR-NEXT: [001] {CompileUnit} 'pr-46466.cpp'
+; THR-NEXT: [005] {Enumerator} 'BLUE' = '0x1'
+; THR-NEXT: [005] {Enumerator} 'RED' = '0x0'
+; THR-EMPTY:
+; THR-NEXT: -----------------------------
+; THR-NEXT: Element Total Printed
+; THR-NEXT: -----------------------------
+; THR-NEXT: Scopes 5 0
+; THR-NEXT: Symbols 0 0
+; THR-NEXT: Types 2 2
+; THR-NEXT: Lines 0 0
+; THR-NEXT: -----------------------------
+; THR-NEXT: Total 7 2
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/05-wasm-incorrect-lexical-scope-variable.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/05-wasm-incorrect-lexical-scope-variable.test
new file mode 100644
index 0000000..4348161
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/05-wasm-incorrect-lexical-scope-variable.test
@@ -0,0 +1,114 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 5 - Incorrect lexical scope variable.
+
+; pr-43860.cpp
+; 1 #include "definitions.h"
+; 2 forceinline int InlineFunction(int Param) {
+; 3 int Var_1 = Param;
+; 4 {
+; 5 int Var_2 = Param + Var_1;
+; 6 Var_1 = Var_2;
+; 7 }
+; 8 return Var_1;
+; 9 }
+; 10
+; 11 int test(int Param_1, int Param_2) {
+; 12 int A = Param_1;
+; 13 A += InlineFunction(Param_2);
+; 14 return A;
+; 15 }
+
+; The above test is used to illustrate a variable issue found in the
+; Clang compiler.
+; PR43860: https://bugs.llvm.org/show_bug.cgi?id=43860
+; PR43205: https://github.com/llvm/llvm-project/issues/43205
+
+; In the following logical views, we can see that the DWARF debug
+; information generated by the Clang compiler shows the variables
+; 'Var_1' and 'Var_2' are at the same lexical scope (4) in the function
+; 'InlineFuction'.
+; The DWARF generated by GCC/Clang show those variables at the correct
+; lexical scope: '3' and '4' respectively.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/pr-43860-clang.s -o %t.pr-43860-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format,producer \
+; RUN: --output-sort=name \
+; RUN: --print=symbols \
+; RUN: %t.pr-43860-clang.o \
+; RUN: %p/../DWARF/Inputs/pr-43860-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Logical View:
+; ONE-NEXT: [000] {File} '{{.*}}pr-43860-clang.o' -> WASM
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'pr-43860.cpp'
+; ONE-NEXT: [002] {Producer} 'clang version 19{{.*}}'
+; ONE-NEXT: [002] 2 {Function} extern inlined 'InlineFunction' -> 'int'
+; ONE-NEXT: [003] {Block}
+; ONE-NEXT: [004] 5 {Variable} 'Var_2' -> 'int'
+; ONE-NEXT: [003] 2 {Parameter} 'Param' -> 'int'
+; ONE-NEXT: [003] 3 {Variable} 'Var_1' -> 'int'
+; ONE-NEXT: [002] 11 {Function} extern not_inlined 'test' -> 'int'
+; ONE-NEXT: [003] 12 {Variable} 'A' -> 'int'
+; ONE-NEXT: [003] 13 {InlinedFunction} inlined 'InlineFunction' -> 'int'
+; ONE-NEXT: [004] {Block}
+; ONE-NEXT: [005] {Variable} 'Var_2' -> 'int'
+; ONE-NEXT: [004] {Parameter} 'Param' -> 'int'
+; ONE-NEXT: [004] {Variable} 'Var_1' -> 'int'
+; ONE-NEXT: [003] 11 {Parameter} 'Param_1' -> 'int'
+; ONE-NEXT: [003] 11 {Parameter} 'Param_2' -> 'int'
+; ONE-EMPTY:
+; ONE-NEXT: Logical View:
+; ONE-NEXT: [000] {File} 'pr-43860-dwarf-gcc.o' -> elf64-x86-64
+; ONE-EMPTY:
+; ONE-NEXT: [001] {CompileUnit} 'pr-43860.cpp'
+; ONE-NEXT: [002] {Producer} 'GNU C++14 10.3.0 {{.*}}'
+; ONE-NEXT: [002] 2 {Function} extern declared_inlined 'InlineFunction' -> 'int'
+; ONE-NEXT: [003] {Block}
+; ONE-NEXT: [004] 5 {Variable} 'Var_2' -> 'int'
+; ONE-NEXT: [003] 2 {Parameter} 'Param' -> 'int'
+; ONE-NEXT: [003] 3 {Variable} 'Var_1' -> 'int'
+; ONE-NEXT: [002] 11 {Function} extern not_inlined 'test' -> 'int'
+; ONE-NEXT: [003] 12 {Variable} 'A' -> 'int'
+; ONE-NEXT: [003] 13 {InlinedFunction} declared_inlined 'InlineFunction' -> 'int'
+; ONE-NEXT: [004] {Block}
+; ONE-NEXT: [005] {Variable} 'Var_2' -> 'int'
+; ONE-NEXT: [004] {Parameter} 'Param' -> 'int'
+; ONE-NEXT: [004] {Variable} 'Var_1' -> 'int'
+; ONE-NEXT: [003] 11 {Parameter} 'Param_1' -> 'int'
+; ONE-NEXT: [003] 11 {Parameter} 'Param_2' -> 'int'
+
+; Using the selection facilities, we can produce a simple tabular output
+; showing just the logical elements that have in their name the 'var'
+; pattern. The logical view is sorted by the variables name.
+
+; RUN: llvm-debuginfo-analyzer --attribute=level,format \
+; RUN: --output-sort=name \
+; RUN: --select-regex --select-nocase \
+; RUN: --select=Var \
+; RUN: --report=list \
+; RUN: --print=symbols \
+; RUN: %t.pr-43860-clang.o \
+; RUN: %p/../DWARF/Inputs/pr-43860-dwarf-gcc.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=TWO %s
+
+; TWO: Logical View:
+; TWO-NEXT: [000] {File} '{{.*}}pr-43860-clang.o' -> WASM
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'pr-43860.cpp'
+; TWO-NEXT: [004] {Variable} 'Var_1' -> 'int'
+; TWO-NEXT: [003] 3 {Variable} 'Var_1' -> 'int'
+; TWO-NEXT: [005] {Variable} 'Var_2' -> 'int'
+; TWO-NEXT: [004] 5 {Variable} 'Var_2' -> 'int'
+; TWO-EMPTY:
+; TWO-NEXT: Logical View:
+; TWO-NEXT: [000] {File} 'pr-43860-dwarf-gcc.o' -> elf64-x86-64
+; TWO-EMPTY:
+; TWO-NEXT: [001] {CompileUnit} 'pr-43860.cpp'
+; TWO-NEXT: [004] {Variable} 'Var_1' -> 'int'
+; TWO-NEXT: [003] 3 {Variable} 'Var_1' -> 'int'
+; TWO-NEXT: [005] {Variable} 'Var_2' -> 'int'
+; TWO-NEXT: [004] 5 {Variable} 'Var_2' -> 'int'
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/06-wasm-full-logical-view.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/06-wasm-full-logical-view.test
new file mode 100644
index 0000000..81b78ba
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/06-wasm-full-logical-view.test
@@ -0,0 +1,158 @@
+; REQUIRES: webassembly-registered-target
+
+; Test case 6 - Full logical view
+
+; test.cpp
+; 1 using INTPTR = const int *;
+; 2 int foo(INTPTR ParamPtr, unsigned ParamUnsigned, bool ParamBool) {
+; 3 if (ParamBool) {
+; 4 typedef int INTEGER;
+; 5 const INTEGER CONSTANT = 7;
+; 6 return CONSTANT;
+; 7 }
+; 8 return ParamUnsigned;
+; 9 }
+
+; Print low level details.
+; The following command prints low level information that includes
+; offsets within the debug information section, debug location
+; operands, linkage names, etc.
+
+; RUN: llvm-mc -arch=wasm32 -filetype=obj \
+; RUN: %p/Inputs/test-clang.s -o %t.test-clang.o
+
+; RUN: llvm-debuginfo-analyzer --attribute=all \
+; RUN: --print=all \
+; RUN: %t.test-clang.o 2>&1 | \
+; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s
+
+; ONE: Logical View:
+; ONE-NEXT: [0x0000000000][000] {File} '{{.*}}test-clang.o' -> WASM
+; ONE-EMPTY:
+; ONE-NEXT: [0x000000000b][001] {CompileUnit} 'test.cpp'
+; ONE-NEXT: [0x000000000b][002] {Producer} 'clang version 19{{.*}}'
+; ONE-NEXT: {Directory} '{{.*}}/general'
+; ONE-NEXT: {File} 'test.cpp'
+; ONE-NEXT: {Public} 'foo' [0x0000000002:0x000000007f]
+; ONE-NEXT: [0x000000000b][002] {Range} Lines 2:9 [0x0000000002:0x000000007f]
+; ONE-NEXT: [0x00000000b3][002] {BaseType} 'bool'
+; ONE-NEXT: [0x0000000090][002] {BaseType} 'int'
+; ONE-NEXT: [0x00000000ac][002] {BaseType} 'unsigned int'
+; ONE-EMPTY:
+; ONE-NEXT: [0x0000000097][002] {Source} '{{.*}}general/test.cpp'
+; ONE-NEXT: [0x0000000097][002] 1 {TypeAlias} 'INTPTR' -> [0x00000000a2]'* const int'
+; ONE-NEXT: [0x0000000026][002] 2 {Function} extern not_inlined 'foo' -> [0x0000000090]'int'
+; ONE-NEXT: [0x0000000026][003] {Range} Lines 2:9 [0x0000000002:0x000000007f]
+; ONE-NEXT: [0x0000000026][003] {Linkage} 0x3 '_Z3fooPKijb'
+; ONE-NEXT: [0x000000006c][003] {Block}
+; ONE-NEXT: [0x000000006c][004] {Range} Lines 5:0 [0x000000004c:0x0000000064]
+; ONE-NEXT: [0x0000000075][004] 5 {Variable} 'CONSTANT' -> [0x00000000ba]'const INTEGER'
+; ONE-NEXT: [0x0000000075][005] {Coverage} 100.00%
+; ONE-NEXT: [0x0000000076][005] {Location}
+; ONE-NEXT: [0x0000000076][006] {Entry} fbreg 12
+; ONE-NEXT: [0x000000004c][004] 5 {Line} {NewStatement} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x000000004c][004] {Code} 'i32.const 7'
+; ONE-NEXT: [0x000000004e][004] {Code} 'local.set 10'
+; ONE-NEXT: [0x0000000050][004] {Code} 'local.get 5'
+; ONE-NEXT: [0x0000000052][004] {Code} 'local.get 10'
+; ONE-NEXT: [0x0000000054][004] {Code} 'i32.store 12'
+; ONE-NEXT: [0x0000000057][004] 6 {Line} {NewStatement} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000057][004] {Code} 'i32.const 7'
+; ONE-NEXT: [0x0000000059][004] {Code} 'local.set 11'
+; ONE-NEXT: [0x000000005b][004] {Code} 'local.get 5'
+; ONE-NEXT: [0x000000005d][004] {Code} 'local.get 11'
+; ONE-NEXT: [0x000000005f][004] {Code} 'i32.store 28'
+; ONE-NEXT: [0x0000000062][004] {Code} 'br 1'
+; ONE-NEXT: [0x0000000064][004] 0 {Line} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000064][004] {Code} 'end'
+; ONE-NEXT: [0x000000005e][003] 2 {Parameter} 'ParamBool' -> [0x00000000b3]'bool'
+; ONE-NEXT: [0x000000005e][004] {Coverage} 100.00%
+; ONE-NEXT: [0x000000005f][004] {Location}
+; ONE-NEXT: [0x000000005f][005] {Entry} fbreg 19
+; ONE-NEXT: [0x0000000042][003] 2 {Parameter} 'ParamPtr' -> [0x0000000097]'INTPTR'
+; ONE-NEXT: [0x0000000042][004] {Coverage} 100.00%
+; ONE-NEXT: [0x0000000043][004] {Location}
+; ONE-NEXT: [0x0000000043][005] {Entry} fbreg 24
+; ONE-NEXT: [0x0000000050][003] 2 {Parameter} 'ParamUnsigned' -> [0x00000000ac]'unsigned int'
+; ONE-NEXT: [0x0000000050][004] {Coverage} 100.00%
+; ONE-NEXT: [0x0000000051][004] {Location}
+; ONE-NEXT: [0x0000000051][005] {Entry} fbreg 20
+; ONE-NEXT: [0x0000000084][003] 4 {TypeAlias} 'INTEGER' -> [0x0000000090]'int'
+; ONE-NEXT: [0x0000000002][003] 2 {Line} {NewStatement} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000002][003] {Code} 'nop'
+; ONE-NEXT: [0x0000000003][003] {Code} 'end'
+; ONE-NEXT: [0x0000000004][003] {Code} 'i64.div_s'
+; ONE-NEXT: [0x0000000005][003] {Code} 'global.get 0'
+; ONE-NEXT: [0x000000000b][003] {Code} 'local.set 3'
+; ONE-NEXT: [0x000000000d][003] {Code} 'i32.const 32'
+; ONE-NEXT: [0x000000000f][003] {Code} 'local.set 4'
+; ONE-NEXT: [0x0000000011][003] {Code} 'local.get 3'
+; ONE-NEXT: [0x0000000013][003] {Code} 'local.get 4'
+; ONE-NEXT: [0x0000000015][003] {Code} 'i32.sub'
+; ONE-NEXT: [0x0000000016][003] {Code} 'local.set 5'
+; ONE-NEXT: [0x0000000018][003] {Code} 'local.get 5'
+; ONE-NEXT: [0x000000001a][003] {Code} 'local.get 0'
+; ONE-NEXT: [0x000000001c][003] {Code} 'i32.store 24'
+; ONE-NEXT: [0x000000001f][003] {Code} 'local.get 5'
+; ONE-NEXT: [0x0000000021][003] {Code} 'local.get 1'
+; ONE-NEXT: [0x0000000023][003] {Code} 'i32.store 20'
+; ONE-NEXT: [0x0000000026][003] {Code} 'local.get 2'
+; ONE-NEXT: [0x0000000028][003] {Code} 'local.set 6'
+; ONE-NEXT: [0x000000002a][003] {Code} 'local.get 5'
+; ONE-NEXT: [0x000000002c][003] {Code} 'local.get 6'
+; ONE-NEXT: [0x000000002e][003] {Code} 'i32.store8 19'
+; ONE-NEXT: [0x0000000031][003] 3 {Line} {NewStatement} {PrologueEnd} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000031][003] {Code} 'local.get 5'
+; ONE-NEXT: [0x0000000033][003] {Code} 'i32.load8_u 19'
+; ONE-NEXT: [0x0000000036][003] {Code} 'local.set 7'
+; ONE-NEXT: [0x0000000038][003] 3 {Line} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000038][003] {Code} 'i32.const 1'
+; ONE-NEXT: [0x000000003a][003] {Code} 'local.set 8'
+; ONE-NEXT: [0x000000003c][003] {Code} 'local.get 7'
+; ONE-NEXT: [0x000000003e][003] {Code} 'local.get 8'
+; ONE-NEXT: [0x0000000040][003] {Code} 'i32.and'
+; ONE-NEXT: [0x0000000041][003] {Code} 'local.set 9'
+; ONE-NEXT: [0x0000000043][003] {Code} 'block'
+; ONE-NEXT: [0x0000000045][003] {Code} 'block'
+; ONE-NEXT: [0x0000000047][003] {Code} 'local.get 9'
+; ONE-NEXT: [0x0000000049][003] {Code} 'i32.eqz'
+; ONE-NEXT: [0x000000004a][003] {Code} 'br_if 0'
+; ONE-NEXT: [0x0000000065][003] 8 {Line} {NewStatement} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000065][003] {Code} 'local.get 5'
+; ONE-NEXT: [0x0000000067][003] {Code} 'i32.load 20'
+; ONE-NEXT: [0x000000006a][003] {Code} 'local.set 12'
+; ONE-NEXT: [0x000000006c][003] 8 {Line} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x000000006c][003] {Code} 'local.get 5'
+; ONE-NEXT: [0x000000006e][003] {Code} 'local.get 12'
+; ONE-NEXT: [0x0000000070][003] {Code} 'i32.store 28'
+; ONE-NEXT: [0x0000000073][003] 0 {Line} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000073][003] {Code} 'end'
+; ONE-NEXT: [0x0000000074][003] 9 {Line} {NewStatement} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x0000000074][003] {Code} 'local.get 5'
+; ONE-NEXT: [0x0000000076][003] {Code} 'i32.load 28'
+; ONE-NEXT: [0x0000000079][003] {Code} 'local.set 13'
+; ONE-NEXT: [0x000000007b][003] {Code} 'local.get 13'
+; ONE-NEXT: [0x000000007d][003] {Code} 'return'
+; ONE-NEXT: [0x000000007e][003] {Code} 'end'
+; ONE-NEXT: [0x000000007f][003] 9 {Line} {NewStatement} {EndSequence} '{{.*}}/general/test.cpp'
+; ONE-NEXT: [0x000000007f][003] {Code} 'unreachable'
+; ONE-EMPTY:
+; ONE-NEXT: -----------------------------
+; ONE-NEXT: Element Total Printed
+; ONE-NEXT: -----------------------------
+; ONE-NEXT: Scopes 3 3
+; ONE-NEXT: Symbols 4 4
+; ONE-NEXT: Types 5 5
+; ONE-NEXT: Lines 73 73
+; ONE-NEXT: -----------------------------
+; ONE-NEXT: Total 85 85
+; ONE-EMPTY:
+; ONE-NEXT: Scope Sizes:
+; ONE-NEXT: 180 (100.00%) : [0x000000000b][001] {CompileUnit} 'test.cpp'
+; ONE-NEXT: 105 ( 58.33%) : [0x0000000026][002] 2 {Function} extern not_inlined 'foo' -> [0x0000000090]'int'
+; ONE-NEXT: 23 ( 12.78%) : [0x000000006c][003] {Block}
+; ONE-EMPTY:
+; ONE-NEXT: Totals by lexical level:
+; ONE-NEXT: [001]: 180 (100.00%)
+; ONE-NEXT: [002]: 105 ( 58.33%)
+; ONE-NEXT: [003]: 23 ( 12.78%)
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/definitions.h b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/definitions.h
new file mode 100644
index 0000000..dfbd3db
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/definitions.h
@@ -0,0 +1,30 @@
+//-----------------------------------------------------------------------------
+// Definitions.
+//-----------------------------------------------------------------------------
+#ifndef SUITE_DEFINITIONS_H
+#define SUITE_DEFINITIONS_H
+
+#ifdef _MSC_VER
+#define forceinline __forceinline
+#define OPTIMIZE_OFF __pragma(optimize("", off))
+#define OPTIMIZE_ON __pragma(optimize("", on))
+#elif defined(__clang__)
+#if __has_attribute(__always_inline__)
+#define forceinline inline __attribute__((__always_inline__))
+#else
+#define forceinline inline
+#endif
+#define OPTIMIZE_OFF _Pragma("clang optimize off")
+#define OPTIMIZE_ON _Pragma("clang optimize on")
+#elif defined(__GNUC__)
+#define forceinline inline __attribute__((__always_inline__))
+#define OPTIMIZE_OFF _Pragma("GCC optimize off")
+#define OPTIMIZE_ON _Pragma("GCC optimize on")
+#else
+#define forceinline inline
+#define OPTIMIZE_OFF
+#define OPTIMIZE_ON
+#error
+#endif
+
+#endif // SUITE_DEFINITIONS_H
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world-clang.s b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world-clang.s
new file mode 100644
index 0000000..bfba259
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world-clang.s
@@ -0,0 +1,286 @@
+ .text
+ .file "hello-world.cpp"
+ .file 1 "/data/projects/scripts/regression-suite/input/general" "hello-world.cpp"
+ .globaltype __stack_pointer, i32
+ .functype __original_main () -> (i32)
+ .functype _Z6printfPKcz (i32, i32) -> (i32)
+ .functype main (i32, i32) -> (i32)
+ .section .text.__original_main,"",@
+ .hidden __original_main # -- Begin function __original_main
+ .globl __original_main
+ .type __original_main,@function
+__original_main: # @__original_main
+.Lfunc_begin0:
+ .loc 1 4 0 # hello-world.cpp:4:0
+ .functype __original_main () -> (i32)
+ .local i32, i32, i32, i32, i32, i32, i32, i32, i32
+# %bb.0: # %entry
+ global.get __stack_pointer
+ local.set 0
+ i32.const 16
+ local.set 1
+ local.get 0
+ local.get 1
+ i32.sub
+ local.set 2
+ local.get 2
+ global.set __stack_pointer
+ i32.const 0
+ local.set 3
+ local.get 2
+ local.get 3
+ i32.store 12
+.Ltmp0:
+ .loc 1 5 3 prologue_end # hello-world.cpp:5:3
+ i32.const .L.str
+ local.set 4
+ i32.const 0
+ local.set 5
+ local.get 4
+ local.get 5
+ call _Z6printfPKcz
+ drop
+ .loc 1 6 3 # hello-world.cpp:6:3
+ i32.const 0
+ local.set 6
+ i32.const 16
+ local.set 7
+ local.get 2
+ local.get 7
+ i32.add
+ local.set 8
+ local.get 8
+ global.set __stack_pointer
+ local.get 6
+ return
+ end_function
+.Ltmp1:
+.Lfunc_end0:
+ # -- End function
+ .section .text.main,"",@
+ .hidden main # -- Begin function main
+ .globl main
+ .type main,@function
+main: # @main
+.Lfunc_begin1:
+ .functype main (i32, i32) -> (i32)
+ .local i32
+# %bb.0: # %body
+ call __original_main
+ local.set 2
+ local.get 2
+ return
+ end_function
+.Lfunc_end1:
+ # -- End function
+ .type .L.str,@object # @.str
+ .section .rodata..L.str,"S",@
+.L.str:
+ .asciz "Hello, World\n"
+ .size .L.str, 14
+
+ .globl __main_void
+ .type __main_void,@function
+ .hidden __main_void
+.set __main_void, __original_main
+ .section .debug_abbrev,"",@
+ .int8 1 # Abbreviation Code
+ .int8 17 # DW_TAG_compile_unit
+ .int8 1 # DW_CHILDREN_yes
+ .int8 37 # DW_AT_producer
+ .int8 14 # DW_FORM_strp
+ .int8 19 # DW_AT_language
+ .int8 5 # DW_FORM_data2
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 16 # DW_AT_stmt_list
+ .int8 23 # DW_FORM_sec_offset
+ .int8 27 # DW_AT_comp_dir
+ .int8 14 # DW_FORM_strp
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 2 # Abbreviation Code
+ .int8 52 # DW_TAG_variable
+ .int8 0 # DW_CHILDREN_no
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 3 # Abbreviation Code
+ .int8 1 # DW_TAG_array_type
+ .int8 1 # DW_CHILDREN_yes
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 4 # Abbreviation Code
+ .int8 33 # DW_TAG_subrange_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 55 # DW_AT_count
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 5 # Abbreviation Code
+ .int8 38 # DW_TAG_const_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 6 # Abbreviation Code
+ .int8 36 # DW_TAG_base_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 62 # DW_AT_encoding
+ .int8 11 # DW_FORM_data1
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 7 # Abbreviation Code
+ .int8 36 # DW_TAG_base_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 62 # DW_AT_encoding
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 8 # Abbreviation Code
+ .int8 46 # DW_TAG_subprogram
+ .int8 0 # DW_CHILDREN_no
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 64 # DW_AT_frame_base
+ .int8 24 # DW_FORM_exprloc
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 63 # DW_AT_external
+ .int8 25 # DW_FORM_flag_present
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 0 # EOM(3)
+ .section .debug_info,"",@
+.Lcu_begin0:
+ .int32 .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .int16 4 # DWARF version number
+ .int32 .debug_abbrev0 # Offset Into Abbrev. Section
+ .int8 4 # Address Size (in bytes)
+ .int8 1 # Abbrev [1] 0xb:0x67 DW_TAG_compile_unit
+ .int32 .Linfo_string0 # DW_AT_producer
+ .int16 33 # DW_AT_language
+ .int32 .Linfo_string1 # DW_AT_name
+ .int32 .Lline_table_start0 # DW_AT_stmt_list
+ .int32 .Linfo_string2 # DW_AT_comp_dir
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 2 # Abbrev [2] 0x26:0xd DW_TAG_variable
+ .int32 51 # DW_AT_type
+ .int8 1 # DW_AT_decl_file
+ .int8 5 # DW_AT_decl_line
+ .int8 5 # DW_AT_location
+ .int8 3
+ .int32 .L.str
+ .int8 3 # Abbrev [3] 0x33:0xc DW_TAG_array_type
+ .int32 63 # DW_AT_type
+ .int8 4 # Abbrev [4] 0x38:0x6 DW_TAG_subrange_type
+ .int32 75 # DW_AT_type
+ .int8 14 # DW_AT_count
+ .int8 0 # End Of Children Mark
+ .int8 5 # Abbrev [5] 0x3f:0x5 DW_TAG_const_type
+ .int32 68 # DW_AT_type
+ .int8 6 # Abbrev [6] 0x44:0x7 DW_TAG_base_type
+ .int32 .Linfo_string3 # DW_AT_name
+ .int8 6 # DW_AT_encoding
+ .int8 1 # DW_AT_byte_size
+ .int8 7 # Abbrev [7] 0x4b:0x7 DW_TAG_base_type
+ .int32 .Linfo_string4 # DW_AT_name
+ .int8 8 # DW_AT_byte_size
+ .int8 7 # DW_AT_encoding
+ .int8 8 # Abbrev [8] 0x52:0x18 DW_TAG_subprogram
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 4 # DW_AT_frame_base
+ .int8 237
+ .int8 0
+ .int8 2
+ .int8 159
+ .int32 .Linfo_string5 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 3 # DW_AT_decl_line
+ .int32 106 # DW_AT_type
+ # DW_AT_external
+ .int8 6 # Abbrev [6] 0x6a:0x7 DW_TAG_base_type
+ .int32 .Linfo_string6 # DW_AT_name
+ .int8 5 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_str,"S",@
+.Linfo_string0:
+ .asciz "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)" # string offset=0
+.Linfo_string1:
+ .asciz "hello-world.cpp" # string offset=111
+.Linfo_string2:
+ .asciz "/data/projects/scripts/regression-suite/input/general" # string offset=127
+.Linfo_string3:
+ .asciz "char" # string offset=181
+.Linfo_string4:
+ .asciz "__ARRAY_SIZE_TYPE__" # string offset=186
+.Linfo_string5:
+ .asciz "main" # string offset=206
+.Linfo_string6:
+ .asciz "int" # string offset=211
+ .ident "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .no_dead_strip __indirect_function_table
+ .section .custom_section.producers,"",@
+ .int8 2
+ .int8 8
+ .ascii "language"
+ .int8 1
+ .int8 14
+ .ascii "C_plus_plus_14"
+ .int8 0
+ .int8 12
+ .ascii "processed-by"
+ .int8 1
+ .int8 5
+ .ascii "clang"
+ .int8 96
+ .ascii "19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .debug_str,"S",@
+ .section .custom_section.target_features,"",@
+ .int8 2
+ .int8 43
+ .int8 15
+ .ascii "mutable-globals"
+ .int8 43
+ .int8 8
+ .ascii "sign-ext"
+ .section .debug_str,"S",@
+ .section .debug_line,"",@
+.Lline_table_start0:
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world.cpp b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world.cpp
new file mode 100644
index 0000000..73a8e24
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/hello-world.cpp
@@ -0,0 +1,7 @@
+extern int printf(const char * format, ... );
+
+int main()
+{
+ printf("Hello, World\n");
+ return 0;
+}
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860-clang.s b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860-clang.s
new file mode 100644
index 0000000..fb70b36
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860-clang.s
@@ -0,0 +1,457 @@
+ .text
+ .file "pr-43860.cpp"
+ .globaltype __stack_pointer, i32
+ .functype _Z4testii (i32, i32) -> (i32)
+ .section .text._Z4testii,"",@
+ .hidden _Z4testii # -- Begin function _Z4testii
+ .globl _Z4testii
+ .type _Z4testii,@function
+_Z4testii: # @_Z4testii
+.Lfunc_begin0:
+ .file 1 "/data/projects/scripts/regression-suite/input/general" "pr-43860.cpp"
+ .loc 1 11 0 # pr-43860.cpp:11:0
+ .functype _Z4testii (i32, i32) -> (i32)
+ .local i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32
+# %bb.0: # %entry
+ global.get __stack_pointer
+ local.set 2
+ i32.const 32
+ local.set 3
+ local.get 2
+ local.get 3
+ i32.sub
+ local.set 4
+ local.get 4
+ local.get 0
+ i32.store 16
+ local.get 4
+ local.get 1
+ i32.store 12
+.Ltmp0:
+ .loc 1 12 11 prologue_end # pr-43860.cpp:12:11
+ local.get 4
+ i32.load 16
+ local.set 5
+ .loc 1 12 7 is_stmt 0 # pr-43860.cpp:12:7
+ local.get 4
+ local.get 5
+ i32.store 8
+ .loc 1 13 23 is_stmt 1 # pr-43860.cpp:13:23
+ local.get 4
+ i32.load 12
+ local.set 6
+ local.get 4
+ local.get 6
+ i32.store 28
+.Ltmp1:
+ .loc 1 3 15 # pr-43860.cpp:3:15
+ local.get 4
+ i32.load 28
+ local.set 7
+ .loc 1 3 7 is_stmt 0 # pr-43860.cpp:3:7
+ local.get 4
+ local.get 7
+ i32.store 24
+.Ltmp2:
+ .loc 1 5 17 is_stmt 1 # pr-43860.cpp:5:17
+ local.get 4
+ i32.load 28
+ local.set 8
+ .loc 1 5 25 is_stmt 0 # pr-43860.cpp:5:25
+ local.get 4
+ i32.load 24
+ local.set 9
+ .loc 1 5 23 # pr-43860.cpp:5:23
+ local.get 8
+ local.get 9
+ i32.add
+ local.set 10
+ .loc 1 5 9 # pr-43860.cpp:5:9
+ local.get 4
+ local.get 10
+ i32.store 20
+ .loc 1 6 13 is_stmt 1 # pr-43860.cpp:6:13
+ local.get 4
+ i32.load 20
+ local.set 11
+ .loc 1 6 11 is_stmt 0 # pr-43860.cpp:6:11
+ local.get 4
+ local.get 11
+ i32.store 24
+.Ltmp3:
+ .loc 1 8 10 is_stmt 1 # pr-43860.cpp:8:10
+ local.get 4
+ i32.load 24
+ local.set 12
+.Ltmp4:
+ .loc 1 13 5 # pr-43860.cpp:13:5
+ local.get 4
+ i32.load 8
+ local.set 13
+ local.get 13
+ local.get 12
+ i32.add
+ local.set 14
+ local.get 4
+ local.get 14
+ i32.store 8
+ .loc 1 14 10 # pr-43860.cpp:14:10
+ local.get 4
+ i32.load 8
+ local.set 15
+ .loc 1 14 3 is_stmt 0 # pr-43860.cpp:14:3
+ local.get 15
+ return
+ end_function
+.Ltmp5:
+.Lfunc_end0:
+ # -- End function
+ .section .debug_abbrev,"",@
+ .int8 1 # Abbreviation Code
+ .int8 17 # DW_TAG_compile_unit
+ .int8 1 # DW_CHILDREN_yes
+ .int8 37 # DW_AT_producer
+ .int8 14 # DW_FORM_strp
+ .int8 19 # DW_AT_language
+ .int8 5 # DW_FORM_data2
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 16 # DW_AT_stmt_list
+ .int8 23 # DW_FORM_sec_offset
+ .int8 27 # DW_AT_comp_dir
+ .int8 14 # DW_FORM_strp
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 2 # Abbreviation Code
+ .int8 46 # DW_TAG_subprogram
+ .int8 1 # DW_CHILDREN_yes
+ .int8 110 # DW_AT_linkage_name
+ .int8 14 # DW_FORM_strp
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 63 # DW_AT_external
+ .int8 25 # DW_FORM_flag_present
+ .int8 32 # DW_AT_inline
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 3 # Abbreviation Code
+ .int8 5 # DW_TAG_formal_parameter
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 4 # Abbreviation Code
+ .int8 52 # DW_TAG_variable
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 5 # Abbreviation Code
+ .int8 11 # DW_TAG_lexical_block
+ .int8 1 # DW_CHILDREN_yes
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 6 # Abbreviation Code
+ .int8 36 # DW_TAG_base_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 62 # DW_AT_encoding
+ .int8 11 # DW_FORM_data1
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 7 # Abbreviation Code
+ .int8 46 # DW_TAG_subprogram
+ .int8 1 # DW_CHILDREN_yes
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 64 # DW_AT_frame_base
+ .int8 24 # DW_FORM_exprloc
+ .int8 110 # DW_AT_linkage_name
+ .int8 14 # DW_FORM_strp
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 63 # DW_AT_external
+ .int8 25 # DW_FORM_flag_present
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 8 # Abbreviation Code
+ .int8 5 # DW_TAG_formal_parameter
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 9 # Abbreviation Code
+ .int8 52 # DW_TAG_variable
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 10 # Abbreviation Code
+ .int8 29 # DW_TAG_inlined_subroutine
+ .int8 1 # DW_CHILDREN_yes
+ .int8 49 # DW_AT_abstract_origin
+ .int8 19 # DW_FORM_ref4
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 88 # DW_AT_call_file
+ .int8 11 # DW_FORM_data1
+ .int8 89 # DW_AT_call_line
+ .int8 11 # DW_FORM_data1
+ .int8 87 # DW_AT_call_column
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 11 # Abbreviation Code
+ .int8 5 # DW_TAG_formal_parameter
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 49 # DW_AT_abstract_origin
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 12 # Abbreviation Code
+ .int8 52 # DW_TAG_variable
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 49 # DW_AT_abstract_origin
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 13 # Abbreviation Code
+ .int8 11 # DW_TAG_lexical_block
+ .int8 1 # DW_CHILDREN_yes
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 0 # EOM(3)
+ .section .debug_info,"",@
+.Lcu_begin0:
+ .int32 .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .int16 4 # DWARF version number
+ .int32 .debug_abbrev0 # Offset Into Abbrev. Section
+ .int8 4 # Address Size (in bytes)
+ .int8 1 # Abbrev [1] 0xb:0xd1 DW_TAG_compile_unit
+ .int32 .Linfo_string0 # DW_AT_producer
+ .int16 33 # DW_AT_language
+ .int32 .Linfo_string1 # DW_AT_name
+ .int32 .Lline_table_start0 # DW_AT_stmt_list
+ .int32 .Linfo_string2 # DW_AT_comp_dir
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 2 # Abbrev [2] 0x26:0x34 DW_TAG_subprogram
+ .int32 .Linfo_string3 # DW_AT_linkage_name
+ .int32 .Linfo_string4 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 2 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ # DW_AT_external
+ .int8 1 # DW_AT_inline
+ .int8 3 # Abbrev [3] 0x36:0xb DW_TAG_formal_parameter
+ .int32 .Linfo_string6 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 2 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ .int8 4 # Abbrev [4] 0x41:0xb DW_TAG_variable
+ .int32 .Linfo_string7 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 3 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ .int8 5 # Abbrev [5] 0x4c:0xd DW_TAG_lexical_block
+ .int8 4 # Abbrev [4] 0x4d:0xb DW_TAG_variable
+ .int32 .Linfo_string8 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 5 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ .int8 0 # End Of Children Mark
+ .int8 0 # End Of Children Mark
+ .int8 6 # Abbrev [6] 0x5a:0x7 DW_TAG_base_type
+ .int32 .Linfo_string5 # DW_AT_name
+ .int8 5 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 7 # Abbrev [7] 0x61:0x7a DW_TAG_subprogram
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 4 # DW_AT_frame_base
+ .int8 237
+ .int8 0
+ .int8 4
+ .int8 159
+ .int32 .Linfo_string9 # DW_AT_linkage_name
+ .int32 .Linfo_string10 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 11 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ # DW_AT_external
+ .int8 8 # Abbrev [8] 0x7d:0xe DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 16
+ .int32 .Linfo_string11 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 11 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ .int8 8 # Abbrev [8] 0x8b:0xe DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 12
+ .int32 .Linfo_string12 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 11 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ .int8 9 # Abbrev [9] 0x99:0xe DW_TAG_variable
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 8
+ .int32 .Linfo_string13 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 12 # DW_AT_decl_line
+ .int32 90 # DW_AT_type
+ .int8 10 # Abbrev [10] 0xa7:0x33 DW_TAG_inlined_subroutine
+ .int32 38 # DW_AT_abstract_origin
+ .int32 .Ltmp1 # DW_AT_low_pc
+ .int32 .Ltmp4-.Ltmp1 # DW_AT_high_pc
+ .int8 1 # DW_AT_call_file
+ .int8 13 # DW_AT_call_line
+ .int8 8 # DW_AT_call_column
+ .int8 11 # Abbrev [11] 0xb7:0x8 DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 28
+ .int32 54 # DW_AT_abstract_origin
+ .int8 12 # Abbrev [12] 0xbf:0x8 DW_TAG_variable
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 24
+ .int32 65 # DW_AT_abstract_origin
+ .int8 13 # Abbrev [13] 0xc7:0x12 DW_TAG_lexical_block
+ .int32 .Ltmp2 # DW_AT_low_pc
+ .int32 .Ltmp3-.Ltmp2 # DW_AT_high_pc
+ .int8 12 # Abbrev [12] 0xd0:0x8 DW_TAG_variable
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 20
+ .int32 77 # DW_AT_abstract_origin
+ .int8 0 # End Of Children Mark
+ .int8 0 # End Of Children Mark
+ .int8 0 # End Of Children Mark
+ .int8 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_str,"S",@
+.Linfo_string0:
+ .asciz "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)" # string offset=0
+.Linfo_string1:
+ .asciz "pr-43860.cpp" # string offset=111
+.Linfo_string2:
+ .asciz "/data/projects/scripts/regression-suite/input/general" # string offset=124
+.Linfo_string3:
+ .asciz "_Z14InlineFunctioni" # string offset=178
+.Linfo_string4:
+ .asciz "InlineFunction" # string offset=198
+.Linfo_string5:
+ .asciz "int" # string offset=213
+.Linfo_string6:
+ .asciz "Param" # string offset=217
+.Linfo_string7:
+ .asciz "Var_1" # string offset=223
+.Linfo_string8:
+ .asciz "Var_2" # string offset=229
+.Linfo_string9:
+ .asciz "_Z4testii" # string offset=235
+.Linfo_string10:
+ .asciz "test" # string offset=245
+.Linfo_string11:
+ .asciz "Param_1" # string offset=250
+.Linfo_string12:
+ .asciz "Param_2" # string offset=258
+.Linfo_string13:
+ .asciz "A" # string offset=266
+ .ident "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .custom_section.producers,"",@
+ .int8 2
+ .int8 8
+ .ascii "language"
+ .int8 1
+ .int8 14
+ .ascii "C_plus_plus_14"
+ .int8 0
+ .int8 12
+ .ascii "processed-by"
+ .int8 1
+ .int8 5
+ .ascii "clang"
+ .int8 96
+ .ascii "19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .debug_str,"S",@
+ .section .custom_section.target_features,"",@
+ .int8 2
+ .int8 43
+ .int8 15
+ .ascii "mutable-globals"
+ .int8 43
+ .int8 8
+ .ascii "sign-ext"
+ .section .debug_str,"S",@
+ .section .debug_line,"",@
+.Lline_table_start0:
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860.cpp b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860.cpp
new file mode 100644
index 0000000..a3d3b76
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-43860.cpp
@@ -0,0 +1,15 @@
+#include "definitions.h"
+forceinline int InlineFunction(int Param) {
+ int Var_1 = Param;
+ {
+ int Var_2 = Param + Var_1;
+ Var_1 = Var_2;
+ }
+ return Var_1;
+}
+
+int test(int Param_1, int Param_2) {
+ int A = Param_1;
+ A += InlineFunction(Param_2);
+ return A;
+}
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884-clang.s b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884-clang.s
new file mode 100644
index 0000000..af9875b
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884-clang.s
@@ -0,0 +1,488 @@
+ .text
+ .file "pr-44884.cpp"
+ .globaltype __stack_pointer, i32
+ .functype _Z3barf (f32) -> (i32)
+ .functype _Z3fooc (i32) -> (i32)
+ .section .text._Z3barf,"",@
+ .hidden _Z3barf # -- Begin function _Z3barf
+ .globl _Z3barf
+ .type _Z3barf,@function
+_Z3barf: # @_Z3barf
+.Lfunc_begin0:
+ .file 1 "/data/projects/scripts/regression-suite/input/general" "pr-44884.cpp"
+ .loc 1 1 0 # pr-44884.cpp:1:0
+ .functype _Z3barf (f32) -> (i32)
+ .local i32, i32, i32, f32, f32, f32, i32, i32, i32, i32, i32, i32
+# %bb.0: # %entry
+ global.get __stack_pointer
+ local.set 1
+ i32.const 16
+ local.set 2
+ local.get 1
+ local.get 2
+ i32.sub
+ local.set 3
+ local.get 3
+ local.get 0
+ f32.store 12
+.Ltmp0:
+ .loc 1 1 36 prologue_end # pr-44884.cpp:1:36
+ local.get 3
+ f32.load 12
+ local.set 4
+ local.get 4
+ f32.abs
+ local.set 5
+ f32.const 0x1p31
+ local.set 6
+ local.get 5
+ local.get 6
+ f32.lt
+ local.set 7
+ local.get 7
+ i32.eqz
+ local.set 8
+ block
+ block
+ local.get 8
+ br_if 0 # 0: down to label1
+# %bb.1: # %entry
+ local.get 4
+ i32.trunc_f32_s
+ local.set 9
+ local.get 9
+ local.set 10
+ br 1 # 1: down to label0
+.LBB0_2: # %entry
+ .loc 1 0 36 is_stmt 0 # pr-44884.cpp:0:36
+ end_block # label1:
+ .loc 1 1 36 # pr-44884.cpp:1:36
+ i32.const -2147483648
+ local.set 11
+ local.get 11
+ local.set 10
+.LBB0_3: # %entry
+ .loc 1 0 36 # pr-44884.cpp:0:36
+ end_block # label0:
+ .loc 1 1 36 # pr-44884.cpp:1:36
+ local.get 10
+ local.set 12
+ .loc 1 1 24 # pr-44884.cpp:1:24
+ local.get 12
+ return
+ end_function
+.Ltmp1:
+.Lfunc_end0:
+ # -- End function
+ .section .text._Z3fooc,"",@
+ .hidden _Z3fooc # -- Begin function _Z3fooc
+ .globl _Z3fooc
+ .type _Z3fooc,@function
+_Z3fooc: # @_Z3fooc
+.Lfunc_begin1:
+ .loc 1 3 0 is_stmt 1 # pr-44884.cpp:3:0
+ .functype _Z3fooc (i32) -> (i32)
+ .local i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, f32, f32, i32, i32, i32, i32, i32, i32, i32, i32, i32
+# %bb.0: # %entry
+ global.get __stack_pointer
+ local.set 1
+ i32.const 16
+ local.set 2
+ local.get 1
+ local.get 2
+ i32.sub
+ local.set 3
+ local.get 3
+ global.set __stack_pointer
+ local.get 3
+ local.get 0
+ i32.store8 15
+.Ltmp2:
+ .loc 1 5 15 prologue_end # pr-44884.cpp:5:15
+ local.get 3
+ i32.load8_u 15
+ local.set 4
+ i32.const 24
+ local.set 5
+ local.get 4
+ local.get 5
+ i32.shl
+ local.set 6
+ local.get 6
+ local.get 5
+ i32.shr_s
+ local.set 7
+ .loc 1 5 7 is_stmt 0 # pr-44884.cpp:5:7
+ local.get 3
+ local.get 7
+ i32.store 8
+.Ltmp3:
+ .loc 1 9 21 is_stmt 1 # pr-44884.cpp:9:21
+ local.get 3
+ i32.load 8
+ local.set 8
+ .loc 1 9 29 is_stmt 0 # pr-44884.cpp:9:29
+ local.get 3
+ i32.load8_u 15
+ local.set 9
+ i32.const 24
+ local.set 10
+ local.get 9
+ local.get 10
+ i32.shl
+ local.set 11
+ local.get 11
+ local.get 10
+ i32.shr_s
+ local.set 12
+ .loc 1 9 27 # pr-44884.cpp:9:27
+ local.get 8
+ local.get 12
+ i32.add
+ local.set 13
+ .loc 1 9 21 # pr-44884.cpp:9:21
+ local.get 13
+ f32.convert_i32_s
+ local.set 14
+ .loc 1 9 13 # pr-44884.cpp:9:13
+ local.get 3
+ local.get 14
+ f32.store 4
+ .loc 1 10 19 is_stmt 1 # pr-44884.cpp:10:19
+ local.get 3
+ f32.load 4
+ local.set 15
+ .loc 1 10 15 is_stmt 0 # pr-44884.cpp:10:15
+ local.get 15
+ call _Z3barf
+ local.set 16
+ .loc 1 10 13 # pr-44884.cpp:10:13
+ local.get 3
+ local.get 16
+ i32.store 8
+.Ltmp4:
+ .loc 1 13 10 is_stmt 1 # pr-44884.cpp:13:10
+ local.get 3
+ i32.load 8
+ local.set 17
+ .loc 1 13 18 is_stmt 0 # pr-44884.cpp:13:18
+ local.get 3
+ i32.load8_u 15
+ local.set 18
+ i32.const 24
+ local.set 19
+ local.get 18
+ local.get 19
+ i32.shl
+ local.set 20
+ local.get 20
+ local.get 19
+ i32.shr_s
+ local.set 21
+ .loc 1 13 16 # pr-44884.cpp:13:16
+ local.get 17
+ local.get 21
+ i32.add
+ local.set 22
+ .loc 1 13 3 # pr-44884.cpp:13:3
+ i32.const 16
+ local.set 23
+ local.get 3
+ local.get 23
+ i32.add
+ local.set 24
+ local.get 24
+ global.set __stack_pointer
+ local.get 22
+ return
+ end_function
+.Ltmp5:
+.Lfunc_end1:
+ # -- End function
+ .section .debug_abbrev,"",@
+ .int8 1 # Abbreviation Code
+ .int8 17 # DW_TAG_compile_unit
+ .int8 1 # DW_CHILDREN_yes
+ .int8 37 # DW_AT_producer
+ .int8 14 # DW_FORM_strp
+ .int8 19 # DW_AT_language
+ .int8 5 # DW_FORM_data2
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 16 # DW_AT_stmt_list
+ .int8 23 # DW_FORM_sec_offset
+ .int8 27 # DW_AT_comp_dir
+ .int8 14 # DW_FORM_strp
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 85 # DW_AT_ranges
+ .int8 23 # DW_FORM_sec_offset
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 2 # Abbreviation Code
+ .int8 36 # DW_TAG_base_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 62 # DW_AT_encoding
+ .int8 11 # DW_FORM_data1
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 3 # Abbreviation Code
+ .int8 46 # DW_TAG_subprogram
+ .int8 1 # DW_CHILDREN_yes
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 64 # DW_AT_frame_base
+ .int8 24 # DW_FORM_exprloc
+ .int8 110 # DW_AT_linkage_name
+ .int8 14 # DW_FORM_strp
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 63 # DW_AT_external
+ .int8 25 # DW_FORM_flag_present
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 4 # Abbreviation Code
+ .int8 5 # DW_TAG_formal_parameter
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 5 # Abbreviation Code
+ .int8 52 # DW_TAG_variable
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 6 # Abbreviation Code
+ .int8 11 # DW_TAG_lexical_block
+ .int8 1 # DW_CHILDREN_yes
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 7 # Abbreviation Code
+ .int8 22 # DW_TAG_typedef
+ .int8 0 # DW_CHILDREN_no
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 0 # EOM(3)
+ .section .debug_info,"",@
+.Lcu_begin0:
+ .int32 .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .int16 4 # DWARF version number
+ .int32 .debug_abbrev0 # Offset Into Abbrev. Section
+ .int8 4 # Address Size (in bytes)
+ .int8 1 # Abbrev [1] 0xb:0xca DW_TAG_compile_unit
+ .int32 .Linfo_string0 # DW_AT_producer
+ .int16 33 # DW_AT_language
+ .int32 .Linfo_string1 # DW_AT_name
+ .int32 .Lline_table_start0 # DW_AT_stmt_list
+ .int32 .Linfo_string2 # DW_AT_comp_dir
+ .int32 0 # DW_AT_low_pc
+ .int32 .Ldebug_ranges0 # DW_AT_ranges
+ .int8 2 # Abbrev [2] 0x26:0x7 DW_TAG_base_type
+ .int32 .Linfo_string3 # DW_AT_name
+ .int8 5 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 3 # Abbrev [3] 0x2d:0x2b DW_TAG_subprogram
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 4 # DW_AT_frame_base
+ .int8 237
+ .int8 0
+ .int8 3
+ .int8 159
+ .int32 .Linfo_string4 # DW_AT_linkage_name
+ .int32 .Linfo_string5 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 1 # DW_AT_decl_line
+ .int32 38 # DW_AT_type
+ # DW_AT_external
+ .int8 4 # Abbrev [4] 0x49:0xe DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 12
+ .int32 .Linfo_string9 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 1 # DW_AT_decl_line
+ .int32 198 # DW_AT_type
+ .int8 0 # End Of Children Mark
+ .int8 3 # Abbrev [3] 0x58:0x67 DW_TAG_subprogram
+ .int32 .Lfunc_begin1 # DW_AT_low_pc
+ .int32 .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc
+ .int8 4 # DW_AT_frame_base
+ .int8 237
+ .int8 0
+ .int8 3
+ .int8 159
+ .int32 .Linfo_string6 # DW_AT_linkage_name
+ .int32 .Linfo_string7 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 3 # DW_AT_decl_line
+ .int32 191 # DW_AT_type
+ # DW_AT_external
+ .int8 4 # Abbrev [4] 0x74:0xe DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 15
+ .int32 .Linfo_string11 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 3 # DW_AT_decl_line
+ .int32 205 # DW_AT_type
+ .int8 5 # Abbrev [5] 0x82:0xe DW_TAG_variable
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 8
+ .int32 .Linfo_string13 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 5 # DW_AT_decl_line
+ .int32 168 # DW_AT_type
+ .int8 6 # Abbrev [6] 0x90:0x18 DW_TAG_lexical_block
+ .int32 .Ltmp3 # DW_AT_low_pc
+ .int32 .Ltmp4-.Ltmp3 # DW_AT_high_pc
+ .int8 5 # Abbrev [5] 0x99:0xe DW_TAG_variable
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 4
+ .int32 .Linfo_string15 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 9 # DW_AT_decl_line
+ .int32 179 # DW_AT_type
+ .int8 0 # End Of Children Mark
+ .int8 7 # Abbrev [7] 0xa8:0xb DW_TAG_typedef
+ .int32 38 # DW_AT_type
+ .int32 .Linfo_string14 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 4 # DW_AT_decl_line
+ .int8 7 # Abbrev [7] 0xb3:0xb DW_TAG_typedef
+ .int32 198 # DW_AT_type
+ .int32 .Linfo_string16 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 7 # DW_AT_decl_line
+ .int8 0 # End Of Children Mark
+ .int8 2 # Abbrev [2] 0xbf:0x7 DW_TAG_base_type
+ .int32 .Linfo_string8 # DW_AT_name
+ .int8 7 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 2 # Abbrev [2] 0xc6:0x7 DW_TAG_base_type
+ .int32 .Linfo_string10 # DW_AT_name
+ .int8 4 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 2 # Abbrev [2] 0xcd:0x7 DW_TAG_base_type
+ .int32 .Linfo_string12 # DW_AT_name
+ .int8 6 # DW_AT_encoding
+ .int8 1 # DW_AT_byte_size
+ .int8 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_ranges,"",@
+.Ldebug_ranges0:
+ .int32 .Lfunc_begin0
+ .int32 .Lfunc_end0
+ .int32 .Lfunc_begin1
+ .int32 .Lfunc_end1
+ .int32 0
+ .int32 0
+ .section .debug_str,"S",@
+.Linfo_string0:
+ .asciz "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)" # string offset=0
+.Linfo_string1:
+ .asciz "pr-44884.cpp" # string offset=111
+.Linfo_string2:
+ .asciz "/data/projects/scripts/regression-suite/input/general" # string offset=124
+.Linfo_string3:
+ .asciz "int" # string offset=178
+.Linfo_string4:
+ .asciz "_Z3barf" # string offset=182
+.Linfo_string5:
+ .asciz "bar" # string offset=190
+.Linfo_string6:
+ .asciz "_Z3fooc" # string offset=194
+.Linfo_string7:
+ .asciz "foo" # string offset=202
+.Linfo_string8:
+ .asciz "unsigned int" # string offset=206
+.Linfo_string9:
+ .asciz "Input" # string offset=219
+.Linfo_string10:
+ .asciz "float" # string offset=225
+.Linfo_string11:
+ .asciz "Param" # string offset=231
+.Linfo_string12:
+ .asciz "char" # string offset=237
+.Linfo_string13:
+ .asciz "Value" # string offset=242
+.Linfo_string14:
+ .asciz "INT" # string offset=248
+.Linfo_string15:
+ .asciz "Added" # string offset=252
+.Linfo_string16:
+ .asciz "FLOAT" # string offset=258
+ .ident "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .custom_section.producers,"",@
+ .int8 2
+ .int8 8
+ .ascii "language"
+ .int8 1
+ .int8 14
+ .ascii "C_plus_plus_14"
+ .int8 0
+ .int8 12
+ .ascii "processed-by"
+ .int8 1
+ .int8 5
+ .ascii "clang"
+ .int8 96
+ .ascii "19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .debug_str,"S",@
+ .section .custom_section.target_features,"",@
+ .int8 2
+ .int8 43
+ .int8 15
+ .ascii "mutable-globals"
+ .int8 43
+ .int8 8
+ .ascii "sign-ext"
+ .section .debug_str,"S",@
+ .section .debug_line,"",@
+.Lline_table_start0:
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884.cpp b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884.cpp
new file mode 100644
index 0000000..4b47aae
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-44884.cpp
@@ -0,0 +1,14 @@
+int bar(float Input) { return (int)Input; }
+
+unsigned foo(char Param) {
+ typedef int INT;
+ INT Value = Param;
+ {
+ typedef float FLOAT;
+ {
+ FLOAT Added = Value + Param;
+ Value = bar(Added);
+ }
+ }
+ return Value + Param;
+}
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466-clang.s b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466-clang.s
new file mode 100644
index 0000000..1056db0
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466-clang.s
@@ -0,0 +1,259 @@
+ .text
+ .file "pr-46466.cpp"
+ .file 1 "/data/projects/scripts/regression-suite/input/general" "pr-46466.cpp"
+ .functype _Z4testv () -> (i32)
+ .section .text._Z4testv,"",@
+ .hidden _Z4testv # -- Begin function _Z4testv
+ .globl _Z4testv
+ .type _Z4testv,@function
+_Z4testv: # @_Z4testv
+.Lfunc_begin0:
+ .functype _Z4testv () -> (i32)
+ .local i32
+# %bb.0: # %entry
+ .loc 1 10 3 prologue_end # pr-46466.cpp:10:3
+ i32.const 1
+ local.set 0
+ local.get 0
+ return
+ end_function
+.Ltmp0:
+.Lfunc_end0:
+ # -- End function
+ .hidden S # @S
+ .type S,@object
+ .section .bss.S,"",@
+ .globl S
+S:
+ .skip 1
+ .size S, 1
+
+ .section .debug_abbrev,"",@
+ .int8 1 # Abbreviation Code
+ .int8 17 # DW_TAG_compile_unit
+ .int8 1 # DW_CHILDREN_yes
+ .int8 37 # DW_AT_producer
+ .int8 14 # DW_FORM_strp
+ .int8 19 # DW_AT_language
+ .int8 5 # DW_FORM_data2
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 16 # DW_AT_stmt_list
+ .int8 23 # DW_FORM_sec_offset
+ .int8 27 # DW_AT_comp_dir
+ .int8 14 # DW_FORM_strp
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 2 # Abbreviation Code
+ .int8 52 # DW_TAG_variable
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 63 # DW_AT_external
+ .int8 25 # DW_FORM_flag_present
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 3 # Abbreviation Code
+ .int8 19 # DW_TAG_structure_type
+ .int8 1 # DW_CHILDREN_yes
+ .int8 54 # DW_AT_calling_convention
+ .int8 11 # DW_FORM_data1
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 4 # Abbreviation Code
+ .int8 13 # DW_TAG_member
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 56 # DW_AT_data_member_location
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 5 # Abbreviation Code
+ .int8 23 # DW_TAG_union_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 54 # DW_AT_calling_convention
+ .int8 11 # DW_FORM_data1
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 6 # Abbreviation Code
+ .int8 46 # DW_TAG_subprogram
+ .int8 0 # DW_CHILDREN_no
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 64 # DW_AT_frame_base
+ .int8 24 # DW_FORM_exprloc
+ .int8 110 # DW_AT_linkage_name
+ .int8 14 # DW_FORM_strp
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 63 # DW_AT_external
+ .int8 25 # DW_FORM_flag_present
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 7 # Abbreviation Code
+ .int8 36 # DW_TAG_base_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 62 # DW_AT_encoding
+ .int8 11 # DW_FORM_data1
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 0 # EOM(3)
+ .section .debug_info,"",@
+.Lcu_begin0:
+ .int32 .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .int16 4 # DWARF version number
+ .int32 .debug_abbrev0 # Offset Into Abbrev. Section
+ .int8 4 # Address Size (in bytes)
+ .int8 1 # Abbrev [1] 0xb:0x72 DW_TAG_compile_unit
+ .int32 .Linfo_string0 # DW_AT_producer
+ .int16 33 # DW_AT_language
+ .int32 .Linfo_string1 # DW_AT_name
+ .int32 .Lline_table_start0 # DW_AT_stmt_list
+ .int32 .Linfo_string2 # DW_AT_comp_dir
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 2 # Abbrev [2] 0x26:0x11 DW_TAG_variable
+ .int32 .Linfo_string3 # DW_AT_name
+ .int32 55 # DW_AT_type
+ # DW_AT_external
+ .int8 1 # DW_AT_decl_file
+ .int8 8 # DW_AT_decl_line
+ .int8 5 # DW_AT_location
+ .int8 3
+ .int32 S
+ .int8 3 # Abbrev [3] 0x37:0x1f DW_TAG_structure_type
+ .int8 5 # DW_AT_calling_convention
+ .int32 .Linfo_string6 # DW_AT_name
+ .int8 1 # DW_AT_byte_size
+ .int8 1 # DW_AT_decl_file
+ .int8 1 # DW_AT_decl_line
+ .int8 4 # Abbrev [4] 0x40:0xc DW_TAG_member
+ .int32 .Linfo_string4 # DW_AT_name
+ .int32 76 # DW_AT_type
+ .int8 1 # DW_AT_decl_file
+ .int8 5 # DW_AT_decl_line
+ .int8 0 # DW_AT_data_member_location
+ .int8 5 # Abbrev [5] 0x4c:0x9 DW_TAG_union_type
+ .int8 5 # DW_AT_calling_convention
+ .int32 .Linfo_string5 # DW_AT_name
+ .int8 1 # DW_AT_byte_size
+ .int8 1 # DW_AT_decl_file
+ .int8 2 # DW_AT_decl_line
+ .int8 0 # End Of Children Mark
+ .int8 6 # Abbrev [6] 0x56:0x1f DW_TAG_subprogram
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 7 # DW_AT_frame_base
+ .int8 237
+ .int8 3
+ .int32 __stack_pointer
+ .int8 159
+ .int32 .Linfo_string7 # DW_AT_linkage_name
+ .int32 .Linfo_string8 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 9 # DW_AT_decl_line
+ .int32 117 # DW_AT_type
+ # DW_AT_external
+ .int8 7 # Abbrev [7] 0x75:0x7 DW_TAG_base_type
+ .int32 .Linfo_string9 # DW_AT_name
+ .int8 5 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_str,"S",@
+.Linfo_string0:
+ .asciz "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)" # string offset=0
+.Linfo_string1:
+ .asciz "pr-46466.cpp" # string offset=111
+.Linfo_string2:
+ .asciz "/data/projects/scripts/regression-suite/input/general" # string offset=124
+.Linfo_string3:
+ .asciz "S" # string offset=178
+.Linfo_string4:
+ .asciz "U" # string offset=180
+.Linfo_string5:
+ .asciz "Union" # string offset=182
+.Linfo_string6:
+ .asciz "Struct" # string offset=188
+.Linfo_string7:
+ .asciz "_Z4testv" # string offset=195
+.Linfo_string8:
+ .asciz "test" # string offset=204
+.Linfo_string9:
+ .asciz "int" # string offset=209
+ .ident "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .custom_section.producers,"",@
+ .int8 2
+ .int8 8
+ .ascii "language"
+ .int8 1
+ .int8 14
+ .ascii "C_plus_plus_14"
+ .int8 0
+ .int8 12
+ .ascii "processed-by"
+ .int8 1
+ .int8 5
+ .ascii "clang"
+ .int8 96
+ .ascii "19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .debug_str,"S",@
+ .section .custom_section.target_features,"",@
+ .int8 2
+ .int8 43
+ .int8 15
+ .ascii "mutable-globals"
+ .int8 43
+ .int8 8
+ .ascii "sign-ext"
+ .section .debug_str,"S",@
+ .section .debug_line,"",@
+.Lline_table_start0:
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466.cpp b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466.cpp
new file mode 100644
index 0000000..28be9a5
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/pr-46466.cpp
@@ -0,0 +1,11 @@
+struct Struct {
+ union Union {
+ enum NestedEnum { RED, BLUE };
+ };
+ Union U;
+};
+
+Struct S;
+int test() {
+ return S.U.BLUE;
+}
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test-clang.s b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test-clang.s
new file mode 100644
index 0000000..02afaf7
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test-clang.s
@@ -0,0 +1,366 @@
+ .text
+ .file "test.cpp"
+ .globaltype __stack_pointer, i32
+ .functype _Z3fooPKijb (i32, i32, i32) -> (i32)
+ .section .text._Z3fooPKijb,"",@
+ .hidden _Z3fooPKijb # -- Begin function _Z3fooPKijb
+ .globl _Z3fooPKijb
+ .type _Z3fooPKijb,@function
+_Z3fooPKijb: # @_Z3fooPKijb
+.Lfunc_begin0:
+ .file 1 "/data/projects/scripts/regression-suite/input/general" "test.cpp"
+ .loc 1 2 0 # test.cpp:2:0
+ .functype _Z3fooPKijb (i32, i32, i32) -> (i32)
+ .local i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32
+# %bb.0: # %entry
+ global.get __stack_pointer
+ local.set 3
+ i32.const 32
+ local.set 4
+ local.get 3
+ local.get 4
+ i32.sub
+ local.set 5
+ local.get 5
+ local.get 0
+ i32.store 24
+ local.get 5
+ local.get 1
+ i32.store 20
+ local.get 2
+ local.set 6
+ local.get 5
+ local.get 6
+ i32.store8 19
+.Ltmp0:
+ .loc 1 3 7 prologue_end # test.cpp:3:7
+ local.get 5
+ i32.load8_u 19
+ local.set 7
+.Ltmp1:
+ .loc 1 3 7 is_stmt 0 # test.cpp:3:7
+ i32.const 1
+ local.set 8
+ local.get 7
+ local.get 8
+ i32.and
+ local.set 9
+ block
+ block
+ local.get 9
+ i32.eqz
+ br_if 0 # 0: down to label1
+# %bb.1: # %if.then
+.Ltmp2:
+ .loc 1 5 19 is_stmt 1 # test.cpp:5:19
+ i32.const 7
+ local.set 10
+ local.get 5
+ local.get 10
+ i32.store 12
+ .loc 1 6 5 # test.cpp:6:5
+ i32.const 7
+ local.set 11
+ local.get 5
+ local.get 11
+ i32.store 28
+ br 1 # 1: down to label0
+.Ltmp3:
+.LBB0_2: # %if.end
+ .loc 1 0 5 is_stmt 0 # test.cpp:0:5
+ end_block # label1:
+ .loc 1 8 10 is_stmt 1 # test.cpp:8:10
+ local.get 5
+ i32.load 20
+ local.set 12
+ .loc 1 8 3 is_stmt 0 # test.cpp:8:3
+ local.get 5
+ local.get 12
+ i32.store 28
+.LBB0_3: # %return
+ .loc 1 0 3 # test.cpp:0:3
+ end_block # label0:
+ .loc 1 9 1 is_stmt 1 # test.cpp:9:1
+ local.get 5
+ i32.load 28
+ local.set 13
+ local.get 13
+ return
+ end_function
+.Ltmp4:
+.Lfunc_end0:
+ # -- End function
+ .section .debug_abbrev,"",@
+ .int8 1 # Abbreviation Code
+ .int8 17 # DW_TAG_compile_unit
+ .int8 1 # DW_CHILDREN_yes
+ .int8 37 # DW_AT_producer
+ .int8 14 # DW_FORM_strp
+ .int8 19 # DW_AT_language
+ .int8 5 # DW_FORM_data2
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 16 # DW_AT_stmt_list
+ .int8 23 # DW_FORM_sec_offset
+ .int8 27 # DW_AT_comp_dir
+ .int8 14 # DW_FORM_strp
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 2 # Abbreviation Code
+ .int8 46 # DW_TAG_subprogram
+ .int8 1 # DW_CHILDREN_yes
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 64 # DW_AT_frame_base
+ .int8 24 # DW_FORM_exprloc
+ .int8 110 # DW_AT_linkage_name
+ .int8 14 # DW_FORM_strp
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 63 # DW_AT_external
+ .int8 25 # DW_FORM_flag_present
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 3 # Abbreviation Code
+ .int8 5 # DW_TAG_formal_parameter
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 4 # Abbreviation Code
+ .int8 11 # DW_TAG_lexical_block
+ .int8 1 # DW_CHILDREN_yes
+ .int8 17 # DW_AT_low_pc
+ .int8 1 # DW_FORM_addr
+ .int8 18 # DW_AT_high_pc
+ .int8 6 # DW_FORM_data4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 5 # Abbreviation Code
+ .int8 52 # DW_TAG_variable
+ .int8 0 # DW_CHILDREN_no
+ .int8 2 # DW_AT_location
+ .int8 24 # DW_FORM_exprloc
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 6 # Abbreviation Code
+ .int8 22 # DW_TAG_typedef
+ .int8 0 # DW_CHILDREN_no
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 58 # DW_AT_decl_file
+ .int8 11 # DW_FORM_data1
+ .int8 59 # DW_AT_decl_line
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 7 # Abbreviation Code
+ .int8 36 # DW_TAG_base_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 3 # DW_AT_name
+ .int8 14 # DW_FORM_strp
+ .int8 62 # DW_AT_encoding
+ .int8 11 # DW_FORM_data1
+ .int8 11 # DW_AT_byte_size
+ .int8 11 # DW_FORM_data1
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 8 # Abbreviation Code
+ .int8 15 # DW_TAG_pointer_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 9 # Abbreviation Code
+ .int8 38 # DW_TAG_const_type
+ .int8 0 # DW_CHILDREN_no
+ .int8 73 # DW_AT_type
+ .int8 19 # DW_FORM_ref4
+ .int8 0 # EOM(1)
+ .int8 0 # EOM(2)
+ .int8 0 # EOM(3)
+ .section .debug_info,"",@
+.Lcu_begin0:
+ .int32 .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .int16 4 # DWARF version number
+ .int32 .debug_abbrev0 # Offset Into Abbrev. Section
+ .int8 4 # Address Size (in bytes)
+ .int8 1 # Abbrev [1] 0xb:0xb5 DW_TAG_compile_unit
+ .int32 .Linfo_string0 # DW_AT_producer
+ .int16 33 # DW_AT_language
+ .int32 .Linfo_string1 # DW_AT_name
+ .int32 .Lline_table_start0 # DW_AT_stmt_list
+ .int32 .Linfo_string2 # DW_AT_comp_dir
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 2 # Abbrev [2] 0x26:0x6a DW_TAG_subprogram
+ .int32 .Lfunc_begin0 # DW_AT_low_pc
+ .int32 .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .int8 4 # DW_AT_frame_base
+ .int8 237
+ .int8 0
+ .int8 5
+ .int8 159
+ .int32 .Linfo_string3 # DW_AT_linkage_name
+ .int32 .Linfo_string4 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 2 # DW_AT_decl_line
+ .int32 144 # DW_AT_type
+ # DW_AT_external
+ .int8 3 # Abbrev [3] 0x42:0xe DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 24
+ .int32 .Linfo_string6 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 2 # DW_AT_decl_line
+ .int32 151 # DW_AT_type
+ .int8 3 # Abbrev [3] 0x50:0xe DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 20
+ .int32 .Linfo_string8 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 2 # DW_AT_decl_line
+ .int32 172 # DW_AT_type
+ .int8 3 # Abbrev [3] 0x5e:0xe DW_TAG_formal_parameter
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 19
+ .int32 .Linfo_string10 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 2 # DW_AT_decl_line
+ .int32 179 # DW_AT_type
+ .int8 4 # Abbrev [4] 0x6c:0x18 DW_TAG_lexical_block
+ .int32 .Ltmp2 # DW_AT_low_pc
+ .int32 .Ltmp3-.Ltmp2 # DW_AT_high_pc
+ .int8 5 # Abbrev [5] 0x75:0xe DW_TAG_variable
+ .int8 2 # DW_AT_location
+ .int8 145
+ .int8 12
+ .int32 .Linfo_string12 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 5 # DW_AT_decl_line
+ .int32 186 # DW_AT_type
+ .int8 0 # End Of Children Mark
+ .int8 6 # Abbrev [6] 0x84:0xb DW_TAG_typedef
+ .int32 144 # DW_AT_type
+ .int32 .Linfo_string13 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 4 # DW_AT_decl_line
+ .int8 0 # End Of Children Mark
+ .int8 7 # Abbrev [7] 0x90:0x7 DW_TAG_base_type
+ .int32 .Linfo_string5 # DW_AT_name
+ .int8 5 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 6 # Abbrev [6] 0x97:0xb DW_TAG_typedef
+ .int32 162 # DW_AT_type
+ .int32 .Linfo_string7 # DW_AT_name
+ .int8 1 # DW_AT_decl_file
+ .int8 1 # DW_AT_decl_line
+ .int8 8 # Abbrev [8] 0xa2:0x5 DW_TAG_pointer_type
+ .int32 167 # DW_AT_type
+ .int8 9 # Abbrev [9] 0xa7:0x5 DW_TAG_const_type
+ .int32 144 # DW_AT_type
+ .int8 7 # Abbrev [7] 0xac:0x7 DW_TAG_base_type
+ .int32 .Linfo_string9 # DW_AT_name
+ .int8 7 # DW_AT_encoding
+ .int8 4 # DW_AT_byte_size
+ .int8 7 # Abbrev [7] 0xb3:0x7 DW_TAG_base_type
+ .int32 .Linfo_string11 # DW_AT_name
+ .int8 2 # DW_AT_encoding
+ .int8 1 # DW_AT_byte_size
+ .int8 9 # Abbrev [9] 0xba:0x5 DW_TAG_const_type
+ .int32 132 # DW_AT_type
+ .int8 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_str,"S",@
+.Linfo_string0:
+ .asciz "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)" # string offset=0
+.Linfo_string1:
+ .asciz "test.cpp" # string offset=111
+.Linfo_string2:
+ .asciz "/data/projects/scripts/regression-suite/input/general" # string offset=120
+.Linfo_string3:
+ .asciz "_Z3fooPKijb" # string offset=174
+.Linfo_string4:
+ .asciz "foo" # string offset=186
+.Linfo_string5:
+ .asciz "int" # string offset=190
+.Linfo_string6:
+ .asciz "ParamPtr" # string offset=194
+.Linfo_string7:
+ .asciz "INTPTR" # string offset=203
+.Linfo_string8:
+ .asciz "ParamUnsigned" # string offset=210
+.Linfo_string9:
+ .asciz "unsigned int" # string offset=224
+.Linfo_string10:
+ .asciz "ParamBool" # string offset=237
+.Linfo_string11:
+ .asciz "bool" # string offset=247
+.Linfo_string12:
+ .asciz "CONSTANT" # string offset=252
+.Linfo_string13:
+ .asciz "INTEGER" # string offset=261
+ .ident "clang version 19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .custom_section.producers,"",@
+ .int8 2
+ .int8 8
+ .ascii "language"
+ .int8 1
+ .int8 14
+ .ascii "C_plus_plus_14"
+ .int8 0
+ .int8 12
+ .ascii "processed-by"
+ .int8 1
+ .int8 5
+ .ascii "clang"
+ .int8 96
+ .ascii "19.0.0git (/data/projects/llvm-root/llvm-project/clang 2db6703f0c257d293df455e2dff8c1fb695c4100)"
+ .section .debug_str,"S",@
+ .section .custom_section.target_features,"",@
+ .int8 2
+ .int8 43
+ .int8 15
+ .ascii "mutable-globals"
+ .int8 43
+ .int8 8
+ .ascii "sign-ext"
+ .section .debug_str,"S",@
+ .section .debug_line,"",@
+.Lline_table_start0:
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test.cpp b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test.cpp
new file mode 100644
index 0000000..5cf39f4
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/Inputs/test.cpp
@@ -0,0 +1,9 @@
+using INTPTR = const int *;
+int foo(INTPTR ParamPtr, unsigned ParamUnsigned, bool ParamBool) {
+ if (ParamBool) {
+ typedef int INTEGER;
+ const INTEGER CONSTANT = 7;
+ return CONSTANT;
+ }
+ return ParamUnsigned;
+}
diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/README.txt b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/README.txt
new file mode 100644
index 0000000..6937bb0
--- /dev/null
+++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/README.txt
@@ -0,0 +1,28 @@
+Notes:
+------
+As we should avoid committing binaries (.wasm) to be used in tests,
+instead we provide the '.cpp' source files and the '.s' files.
+
+- For the tests, only the '.s' files are required.
+- We use the target 'wasm32' as the 'wasm64' is not standardized yet.
+
+How to generate .s from .cpp
+----------------------------
+Use clang to generate the '.s'.
+
+ clang --target=wasm32 -S -g Inputs/hello-world.cpp -o Inputs/hello-world-clang.s
+ clang --target=wasm32 -S -g Inputs/pr-43860.cpp -o Inputs/pr-43860-clang.s
+ clang --target=wasm32 -S -g Inputs/pr-44884.cpp -o Inputs/pr-44884-clang.s
+ clang --target=wasm32 -S -g Inputs/pr-46466.cpp -o Inputs/pr-46466-clang.s
+ clang --target=wasm32 -S -g Inputs/test.cpp -o Inputs/test-clang.s
+
+How to generate .o from .s
+--------------------------------
+Each test executes one of the following commands in order to generate
+the binary '.wasm' used by that specific test:
+
+ llvm-mc -arch=wasm32 -filetype=obj %p/Inputs/hello-world-clang.s -o hello-world-clang.o
+ llvm-mc -arch=wasm32 -filetype=obj %p/Inputs/pr-43860-clang.s -o pr-43860-clang.o
+ llvm-mc -arch=wasm32 -filetype=obj %p/Inputs/pr-44884-clang.s -o pr-44884-clang.o
+ llvm-mc -arch=wasm32 -filetype=obj %p/Inputs/pr-46466-clang.s -o pr-46466-clang.o
+ llvm-mc -arch=wasm32 -filetype=obj %p/Inputs/test-clang.s -o test-clang.o
diff --git a/llvm/test/tools/llvm-lib/arm64ec-implib.test b/llvm/test/tools/llvm-lib/arm64ec-implib.test
index 00eddd2..e9987d0 100644
--- a/llvm/test/tools/llvm-lib/arm64ec-implib.test
+++ b/llvm/test/tools/llvm-lib/arm64ec-implib.test
@@ -14,6 +14,8 @@ ARMAP-NEXT: Archive EC map
ARMAP-NEXT: #expname in test.dll
ARMAP-NEXT: #funcexp in test.dll
ARMAP-NEXT: #mangledfunc in test.dll
+ARMAP-NEXT: #manglednonamefunc in test.dll
+ARMAP-NEXT: #nonamefunc in test.dll
ARMAP-NEXT: ?test_cpp_func@@$$hYAHPEAX@Z in test.dll
ARMAP-NEXT: ?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAP-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
@@ -23,28 +25,34 @@ ARMAP-NEXT: __imp_aux_?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAP-NEXT: __imp_aux_expname in test.dll
ARMAP-NEXT: __imp_aux_funcexp in test.dll
ARMAP-NEXT: __imp_aux_mangledfunc in test.dll
+ARMAP-NEXT: __imp_aux_manglednonamefunc in test.dll
+ARMAP-NEXT: __imp_aux_nonamefunc in test.dll
ARMAP-NEXT: __imp_dataexp in test.dll
ARMAP-NEXT: __imp_expname in test.dll
ARMAP-NEXT: __imp_funcexp in test.dll
ARMAP-NEXT: __imp_mangledfunc in test.dll
+ARMAP-NEXT: __imp_manglednonamefunc in test.dll
+ARMAP-NEXT: __imp_nonamefunc in test.dll
ARMAP-NEXT: expname in test.dll
ARMAP-NEXT: funcexp in test.dll
ARMAP-NEXT: mangledfunc in test.dll
+ARMAP-NEXT: manglednonamefunc in test.dll
+ARMAP-NEXT: nonamefunc in test.dll
ARMAP-NEXT: test_NULL_THUNK_DATA in test.dll
RUN: llvm-readobj test.lib | FileCheck -check-prefix=READOBJ %s
-READOBJ: File: test.lib(test.dll)
+READOBJ: File: test{{.*}}.lib(test.dll)
READOBJ-NEXT: Format: COFF-ARM64{{$}}
READOBJ-NEXT: Arch: aarch64
READOBJ-NEXT: AddressSize: 64bit
READOBJ-EMPTY:
-READOBJ-NEXT: File: test.lib(test.dll)
+READOBJ-NEXT: File: test{{.*}}.lib(test.dll)
READOBJ-NEXT: Format: COFF-ARM64{{$}}
READOBJ-NEXT: Arch: aarch64
READOBJ-NEXT: AddressSize: 64bit
READOBJ-EMPTY:
-READOBJ-NEXT: File: test.lib(test.dll)
+READOBJ-NEXT: File: test{{.*}}.lib(test.dll)
READOBJ-NEXT: Format: COFF-ARM64{{$}}
READOBJ-NEXT: Arch: aarch64
READOBJ-NEXT: AddressSize: 64bit
@@ -95,6 +103,30 @@ READOBJ-NEXT: Type: data
READOBJ-NEXT: Name type: name
READOBJ-NEXT: Export name: dataexp
READOBJ-NEXT: Symbol: __imp_dataexp
+READOBJ-EMPTY:
+READOBJ-NEXT: File: test.dll
+READOBJ-NEXT: Format: COFF-import-file-ARM64EC
+READOBJ-NEXT: Type: code
+READOBJ-NEXT: Name type: ordinal
+READOBJ-NEXT: Symbol: __imp_nonamefunc
+READOBJ-NEXT: Symbol: nonamefunc
+READOBJ-NEXT: Symbol: __imp_aux_nonamefunc
+READOBJ-NEXT: Symbol: #nonamefunc
+READOBJ-EMPTY:
+READOBJ-NEXT: File: test.dll
+READOBJ-NEXT: Format: COFF-import-file-ARM64EC
+READOBJ-NEXT: Type: code
+READOBJ-NEXT: Name type: ordinal
+READOBJ-NEXT: Symbol: __imp_manglednonamefunc
+READOBJ-NEXT: Symbol: manglednonamefunc
+READOBJ-NEXT: Symbol: __imp_aux_manglednonamefunc
+READOBJ-NEXT: Symbol: #manglednonamefunc
+
+
+Using -machine:arm64x gives the same output.
+RUN: llvm-lib -machine:arm64x -def:test.def -out:testx.lib
+RUN: llvm-nm --print-armap testx.lib | FileCheck -check-prefix=ARMAP %s
+RUN: llvm-readobj testx.lib | FileCheck -check-prefix=READOBJ %s
Creating a new lib containing the existing lib:
RUN: llvm-lib -machine:arm64ec test.lib -out:test2.lib
@@ -107,22 +139,28 @@ RUN: llvm-nm --print-armap testx.lib | FileCheck -check-prefix=ARMAPX %s
ARMAPX: Archive map
ARMAPX-NEXT: #mangledfunc in test.dll
+ARMAPX-NEXT: #manglednonamefunc in test.dll
ARMAPX-NEXT: ?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
ARMAPX-NEXT: __NULL_IMPORT_DESCRIPTOR in test.dll
ARMAPX-NEXT: __imp_#mangledfunc in test.dll
+ARMAPX-NEXT: __imp_#manglednonamefunc in test.dll
ARMAPX-NEXT: __imp_?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __imp_dataexp in test.dll
ARMAPX-NEXT: __imp_expname in test.dll
ARMAPX-NEXT: __imp_funcexp in test.dll
+ARMAPX-NEXT: __imp_nonamefunc in test.dll
ARMAPX-NEXT: expname in test.dll
ARMAPX-NEXT: funcexp in test.dll
+ARMAPX-NEXT: nonamefunc in test.dll
ARMAPX-NEXT: test_NULL_THUNK_DATA in test.dll
ARMAPX-EMPTY:
ARMAPX-NEXT: Archive EC map
ARMAPX-NEXT: #expname in test.dll
ARMAPX-NEXT: #funcexp in test.dll
ARMAPX-NEXT: #mangledfunc in test.dll
+ARMAPX-NEXT: #manglednonamefunc in test.dll
+ARMAPX-NEXT: #nonamefunc in test.dll
ARMAPX-NEXT: ?test_cpp_func@@$$hYAHPEAX@Z in test.dll
ARMAPX-NEXT: ?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
@@ -132,13 +170,19 @@ ARMAPX-NEXT: __imp_aux_?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __imp_aux_expname in test.dll
ARMAPX-NEXT: __imp_aux_funcexp in test.dll
ARMAPX-NEXT: __imp_aux_mangledfunc in test.dll
+ARMAPX-NEXT: __imp_aux_manglednonamefunc in test.dll
+ARMAPX-NEXT: __imp_aux_nonamefunc in test.dll
ARMAPX-NEXT: __imp_dataexp in test.dll
ARMAPX-NEXT: __imp_expname in test.dll
ARMAPX-NEXT: __imp_funcexp in test.dll
ARMAPX-NEXT: __imp_mangledfunc in test.dll
+ARMAPX-NEXT: __imp_manglednonamefunc in test.dll
+ARMAPX-NEXT: __imp_nonamefunc in test.dll
ARMAPX-NEXT: expname in test.dll
ARMAPX-NEXT: funcexp in test.dll
ARMAPX-NEXT: mangledfunc in test.dll
+ARMAPX-NEXT: manglednonamefunc in test.dll
+ARMAPX-NEXT: nonamefunc in test.dll
ARMAPX-NEXT: test_NULL_THUNK_DATA in test.dll
RUN: llvm-readobj testx.lib | FileCheck -check-prefix=READOBJX %s
@@ -206,6 +250,24 @@ READOBJX-NEXT: Export name: dataexp
READOBJX-NEXT: Symbol: __imp_dataexp
READOBJX-EMPTY:
READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64EC
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_nonamefunc
+READOBJX-NEXT: Symbol: nonamefunc
+READOBJX-NEXT: Symbol: __imp_aux_nonamefunc
+READOBJX-NEXT: Symbol: #nonamefunc
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64EC
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_manglednonamefunc
+READOBJX-NEXT: Symbol: manglednonamefunc
+READOBJX-NEXT: Symbol: __imp_aux_manglednonamefunc
+READOBJX-NEXT: Symbol: #manglednonamefunc
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
READOBJX-NEXT: Format: COFF-import-file-ARM64
READOBJX-NEXT: Type: code
READOBJX-NEXT: Name type: name
@@ -243,10 +305,26 @@ READOBJX-NEXT: Type: data
READOBJX-NEXT: Name type: name
READOBJX-NEXT: Export name: dataexp
READOBJX-NEXT: Symbol: __imp_dataexp
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_nonamefunc
+READOBJX-NEXT: Symbol: nonamefunc
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_#manglednonamefunc
+READOBJX-NEXT: Symbol: #manglednonamefunc
RUN: llvm-lib -machine:arm64ec -def:test.def -defArm64Native:test2.def -out:test2.lib
+RUN: llvm-lib -machine:arm64ec -def:test.def -defArm64Native:test2.def -out:test2x.lib
RUN: llvm-nm --print-armap test2.lib | FileCheck -check-prefix=ARMAPX2 %s
+RUN: llvm-nm --print-armap test2x.lib | FileCheck -check-prefix=ARMAPX2 %s
ARMAPX2: Archive map
ARMAPX2-NEXT: __IMPORT_DESCRIPTOR_test2 in test2.dll
@@ -259,6 +337,8 @@ ARMAPX2-NEXT: Archive EC map
ARMAPX2-NEXT: #expname in test2.dll
ARMAPX2-NEXT: #funcexp in test2.dll
ARMAPX2-NEXT: #mangledfunc in test2.dll
+ARMAPX2-NEXT: #manglednonamefunc in test2.dll
+ARMAPX2-NEXT: #nonamefunc in test2.dll
ARMAPX2-NEXT: ?test_cpp_func@@$$hYAHPEAX@Z in test2.dll
ARMAPX2-NEXT: ?test_cpp_func@@YAHPEAX@Z in test2.dll
ARMAPX2-NEXT: __IMPORT_DESCRIPTOR_test2 in test2.dll
@@ -268,13 +348,19 @@ ARMAPX2-NEXT: __imp_aux_?test_cpp_func@@YAHPEAX@Z in test2.dll
ARMAPX2-NEXT: __imp_aux_expname in test2.dll
ARMAPX2-NEXT: __imp_aux_funcexp in test2.dll
ARMAPX2-NEXT: __imp_aux_mangledfunc in test2.dll
+ARMAPX2-NEXT: __imp_aux_manglednonamefunc in test2.dll
+ARMAPX2-NEXT: __imp_aux_nonamefunc in test2.dll
ARMAPX2-NEXT: __imp_dataexp in test2.dll
ARMAPX2-NEXT: __imp_expname in test2.dll
ARMAPX2-NEXT: __imp_funcexp in test2.dll
ARMAPX2-NEXT: __imp_mangledfunc in test2.dll
+ARMAPX2-NEXT: __imp_manglednonamefunc in test2.dll
+ARMAPX2-NEXT: __imp_nonamefunc in test2.dll
ARMAPX2-NEXT: expname in test2.dll
ARMAPX2-NEXT: funcexp in test2.dll
ARMAPX2-NEXT: mangledfunc in test2.dll
+ARMAPX2-NEXT: manglednonamefunc in test2.dll
+ARMAPX2-NEXT: nonamefunc in test2.dll
ARMAPX2-NEXT: test2_NULL_THUNK_DATA in test2.dll
ARMAPX2: test2.dll:
@@ -305,6 +391,18 @@ ARMAPX2-NEXT: test2.dll:
ARMAPX2-NEXT: 00000000 D __imp_dataexp
ARMAPX2-EMPTY:
ARMAPX2-NEXT: test2.dll:
+ARMAPX2-NEXT: 00000000 T #nonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_aux_nonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_nonamefunc
+ARMAPX2-NEXT: 00000000 T nonamefunc
+ARMAPX2-EMPTY:
+ARMAPX2-NEXT: test2.dll:
+ARMAPX2-NEXT: 00000000 T #manglednonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_aux_manglednonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_manglednonamefunc
+ARMAPX2-NEXT: 00000000 T manglednonamefunc
+ARMAPX2-EMPTY:
+ARMAPX2-NEXT: test2.dll:
ARMAPX2-NEXT: 00000000 T __imp_otherfunc
ARMAPX2-NEXT: 00000000 T otherfunc
@@ -399,6 +497,8 @@ EXPORTS
?test_cpp_func@@YAHPEAX@Z
expname=impname
dataexp DATA
+ nonamefunc @1 NONAME
+ #manglednonamefunc @2 NONAME
#--- test2.def
LIBRARY test2.dll
diff --git a/llvm/test/tools/llvm-lib/empty.test b/llvm/test/tools/llvm-lib/empty.test
new file mode 100644
index 0000000..37c841f
--- /dev/null
+++ b/llvm/test/tools/llvm-lib/empty.test
@@ -0,0 +1,27 @@
+Create import libraries with empty exports and make sure that archive symbols
+are properly populated.
+
+RUN: split-file %s %t.dir && cd %t.dir
+
+RUN: llvm-lib -machine:arm64 -out:arm64.lib -def:test.def
+RUN: llvm-nm --print-armap arm64.lib | FileCheck --check-prefixes=CHECK,NOECMAP %s
+
+RUN: llvm-lib -machine:arm64ec -out:arm64ec.lib -def:test.def
+RUN: llvm-nm --print-armap arm64ec.lib | FileCheck --check-prefixes=CHECK,ECMAP %s
+
+CHECK: Archive map
+CHECK-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
+CHECK-NEXT: __NULL_IMPORT_DESCRIPTOR in test.dll
+CHECK-NEXT: test_NULL_THUNK_DATA in test.dll
+CHECK-EMPTY:
+
+ECMAP: Archive EC map
+ECMAP-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
+ECMAP-NEXT: __NULL_IMPORT_DESCRIPTOR in test.dll
+ECMAP-NEXT: test_NULL_THUNK_DATA in test.dll
+
+NOECMAP-NOT: Archive EC map
+
+#--- test.def
+LIBRARY test.dll
+EXPORTS
diff --git a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
index efa81b0..f120f5f 100644
--- a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
+++ b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
@@ -4093,8 +4093,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 12 7.00 sdiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: 1 20 7.00 sdivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 12 7.00 sdivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: 1 4 0.50 sdot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: 1 4 0.50 sdot z0.d, z1.h, z31.h
+# CHECK-NEXT: 1 4 1.00 sdot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: 1 4 1.00 sdot z0.d, z1.h, z31.h
# CHECK-NEXT: 1 3 0.50 sdot z0.s, z1.b, z31.b
# CHECK-NEXT: 1 3 0.50 sdot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: 1 2 0.50 sel z23.b, p11, z13.b, z8.b
@@ -4569,8 +4569,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 12 7.00 udiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: 1 20 7.00 udivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 12 7.00 udivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: 1 4 0.50 udot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: 1 4 0.50 udot z0.d, z1.h, z31.h
+# CHECK-NEXT: 1 4 1.00 udot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: 1 4 1.00 udot z0.d, z1.h, z31.h
# CHECK-NEXT: 1 3 0.50 udot z0.s, z1.b, z31.b
# CHECK-NEXT: 1 3 0.50 udot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: 1 2 0.50 umax z0.b, z0.b, #0
@@ -4839,7 +4839,7 @@ zip2 z31.s, z31.s, z31.s
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
-# CHECK-NEXT: - - - - 88.67 500.67 500.67 797.50 2.50 92.50 92.50 1250.00 923.00 178.50 181.50
+# CHECK-NEXT: - - - - 88.67 500.67 500.67 797.50 2.50 92.50 92.50 1252.00 921.00 178.50 181.50
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
@@ -6521,8 +6521,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - sdiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - sdivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - sdivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.d, z1.h, z31.h
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - sdot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - sdot z0.d, z1.h, z31.h
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.s, z1.b, z31.b
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sel z23.b, p11, z13.b, z8.b
@@ -6997,8 +6997,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - udiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - udivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - udivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.d, z1.h, z31.h
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - udot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - udot z0.d, z1.h, z31.h
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.s, z1.b, z31.b
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - umax z0.b, z0.b, #0
diff --git a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-sve-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-sve-instructions.s
index 4d6ce70..acd3556 100644
--- a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-sve-instructions.s
+++ b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-sve-instructions.s
@@ -4278,9 +4278,9 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 3 0.25 fscale z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 3 0.25 fscale z0.h, p7/m, z0.h, z31.h
# CHECK-NEXT: 1 3 0.25 fscale z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: 1 16 14.00 fsqrt z31.d, p7/m, z31.d
-# CHECK-NEXT: 1 13 12.00 fsqrt z31.h, p7/m, z31.h
-# CHECK-NEXT: 1 10 9.00 fsqrt z31.s, p7/m, z31.s
+# CHECK-NEXT: 1 16 7.00 fsqrt z31.d, p7/m, z31.d
+# CHECK-NEXT: 1 13 6.00 fsqrt z31.h, p7/m, z31.h
+# CHECK-NEXT: 1 10 4.50 fsqrt z31.s, p7/m, z31.s
# CHECK-NEXT: 1 2 0.25 fsub z0.d, p0/m, z0.d, #0.5
# CHECK-NEXT: 1 2 0.25 fsub z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 2 0.25 fsub z0.d, z1.d, z31.d
@@ -6861,7 +6861,7 @@ zip2 z31.s, z31.s, z31.s
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
-# CHECK-NEXT: - - - - 245.00 651.00 651.00 570.50 272.50 83.75 83.75 81.75 81.75 1554.25 1281.75 776.75 748.25
+# CHECK-NEXT: - - - - 245.00 651.00 651.00 570.50 272.50 83.75 83.75 81.75 81.75 1536.75 1281.75 794.25 748.25
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
@@ -7718,9 +7718,9 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 fscale z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 fscale z0.h, p7/m, z0.h, z31.h
# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 fscale z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: - - - - - - - - - - - - - 14.00 - - - fsqrt z31.d, p7/m, z31.d
-# CHECK-NEXT: - - - - - - - - - - - - - 12.00 - - - fsqrt z31.h, p7/m, z31.h
-# CHECK-NEXT: - - - - - - - - - - - - - 9.00 - - - fsqrt z31.s, p7/m, z31.s
+# CHECK-NEXT: - - - - - - - - - - - - - 7.00 - 7.00 - fsqrt z31.d, p7/m, z31.d
+# CHECK-NEXT: - - - - - - - - - - - - - 6.00 - 6.00 - fsqrt z31.h, p7/m, z31.h
+# CHECK-NEXT: - - - - - - - - - - - - - 4.50 - 4.50 - fsqrt z31.s, p7/m, z31.s
# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 fsub z0.d, p0/m, z0.d, #0.5
# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 fsub z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 fsub z0.d, z1.d, z31.d
diff --git a/llvm/test/tools/llvm-mca/ARM/cortex-a57-basic-instructions.s b/llvm/test/tools/llvm-mca/ARM/cortex-a57-basic-instructions.s
index 2257e45..d686293 100644
--- a/llvm/test/tools/llvm-mca/ARM/cortex-a57-basic-instructions.s
+++ b/llvm/test/tools/llvm-mca/ARM/cortex-a57-basic-instructions.s
@@ -1023,7 +1023,7 @@
# CHECK-NEXT: 2 2 1.00 blxne r2
# CHECK-NEXT: 2 1 1.00 U blx #32424576
# CHECK-NEXT: 2 1 1.00 U blx #16212288
-# CHECK-NEXT: 1 1 1.00 U bx r2
+# CHECK-NEXT: 1 1 1.00 bx r2
# CHECK-NEXT: 1 1 1.00 U bxne r2
# CHECK-NEXT: 1 1 1.00 U bxj r2
# CHECK-NEXT: 1 1 1.00 U bxjne r2
diff --git a/llvm/test/tools/llvm-mca/RISCV/SiFive7/gpr-bypass.s b/llvm/test/tools/llvm-mca/RISCV/SiFive7/gpr-bypass.s
index 892a5d1..03f7de2 100644
--- a/llvm/test/tools/llvm-mca/RISCV/SiFive7/gpr-bypass.s
+++ b/llvm/test/tools/llvm-mca/RISCV/SiFive7/gpr-bypass.s
@@ -180,10 +180,10 @@ jr a0
# CHECK-NEXT: 1 3 0.50 sext.b a0, a0
# CHECK-NEXT: 1 3 0.50 sext.h a0, a0
# CHECK-NEXT: 1 3 0.50 zext.h a0, a0
-# CHECK-NEXT: 1 3 0.50 min a0, a0, a0
-# CHECK-NEXT: 1 3 0.50 minu a0, a0, a0
-# CHECK-NEXT: 1 3 0.50 max a0, a0, a0
-# CHECK-NEXT: 1 3 0.50 maxu a0, a0, a0
+# CHECK-NEXT: 1 3 1.00 min a0, a0, a0
+# CHECK-NEXT: 1 3 1.00 minu a0, a0, a0
+# CHECK-NEXT: 1 3 1.00 max a0, a0, a0
+# CHECK-NEXT: 1 3 1.00 maxu a0, a0, a0
# CHECK-NEXT: 1 3 1.00 rol a0, a0, a0
# CHECK-NEXT: 1 3 1.00 ror a0, a0, a0
# CHECK-NEXT: 1 3 1.00 rori a0, a0, 1
@@ -225,7 +225,7 @@ jr a0
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7]
-# CHECK-NEXT: - - 39.00 52.00 - - - -
+# CHECK-NEXT: - - 37.00 54.00 - - - -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] Instructions:
@@ -289,9 +289,9 @@ jr a0
# CHECK-NEXT: - - - 1.00 - - - - sext.h a0, a0
# CHECK-NEXT: - - 1.00 - - - - - zext.h a0, a0
# CHECK-NEXT: - - - 1.00 - - - - min a0, a0, a0
-# CHECK-NEXT: - - 1.00 - - - - - minu a0, a0, a0
+# CHECK-NEXT: - - - 1.00 - - - - minu a0, a0, a0
# CHECK-NEXT: - - - 1.00 - - - - max a0, a0, a0
-# CHECK-NEXT: - - 1.00 - - - - - maxu a0, a0, a0
+# CHECK-NEXT: - - - 1.00 - - - - maxu a0, a0, a0
# CHECK-NEXT: - - - 1.00 - - - - rol a0, a0, a0
# CHECK-NEXT: - - - 1.00 - - - - ror a0, a0, a0
# CHECK-NEXT: - - - 1.00 - - - - rori a0, a0, 1
diff --git a/llvm/test/tools/llvm-mca/RISCV/SiFive7/vector-integer-arithmetic.s b/llvm/test/tools/llvm-mca/RISCV/SiFive7/vector-integer-arithmetic.s
index 21459bc..3b6fd7e 100644
--- a/llvm/test/tools/llvm-mca/RISCV/SiFive7/vector-integer-arithmetic.s
+++ b/llvm/test/tools/llvm-mca/RISCV/SiFive7/vector-integer-arithmetic.s
@@ -399,6 +399,26 @@ vmseq.vv v4, v8, v12
vsetvli zero, zero, e64, m8, tu, mu
vmseq.vx v4, v8, x10
+# Pseudo instructions
+vsetvli zero, zero, e8, mf8, tu, mu
+vmslt.vi v4, v8, 1
+vsetvli zero, zero, e8, mf4, tu, mu
+vmsltu.vi v4, v8, 1
+vsetvli zero, zero, e8, mf2, tu, mu
+vmsltu.vi v4, v8, 0
+vsetvli zero, zero, e8, m1, tu, mu
+vmsgeu.vi v4, v8, 1
+vsetvli zero, zero, e8, m2, tu, mu
+vmsge.vi v4, v8, 1
+vsetvli zero, zero, e8, m4, tu, mu
+vmsgeu.vi v4, v8, 0
+vsetvli zero, zero, e16, mf4, tu, mu
+vmsge.vi v4, v8, 0
+vsetvli zero, zero, e16, mf2, tu, mu
+vmsge.vx v4, v8, x10
+vsetvli zero, zero, e16, m1, tu, mu
+vmsgeu.vx v4, v8, x11
+
# Vector Integer Min/Max Instructions
vsetvli zero, zero, e8, mf8, tu, mu
vminu.vv v4, v8, v12
@@ -754,14 +774,14 @@ vsetvli zero, zero, e64, m8, tu, mu
vmv.v.v v4, v12
# CHECK: Iterations: 1
-# CHECK-NEXT: Instructions: 707
-# CHECK-NEXT: Total Cycles: 11962
-# CHECK-NEXT: Total uOps: 707
+# CHECK-NEXT: Instructions: 727
+# CHECK-NEXT: Total Cycles: 12018
+# CHECK-NEXT: Total uOps: 727
# CHECK: Dispatch Width: 2
# CHECK-NEXT: uOps Per Cycle: 0.06
# CHECK-NEXT: IPC: 0.06
-# CHECK-NEXT: Block RThroughput: 11549.0
+# CHECK-NEXT: Block RThroughput: 11583.0
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
@@ -1144,6 +1164,26 @@ vmv.v.v v4, v12
# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e64, m8, tu, mu
# CHECK-NEXT: 1 19 17.00 vmseq.vx v4, v8, a0
# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: 1 4 2.00 vmsle.vi v4, v8, 0
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: 1 4 2.00 vmsleu.vi v4, v8, 0
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: 1 4 2.00 vmsne.vv v4, v8, v8
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 5 3.00 vmsgtu.vi v4, v8, 0
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: 1 7 5.00 vmsgt.vi v4, v8, 0
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: 1 11 9.00 vmseq.vv v4, v8, v8
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: 1 4 2.00 vmsgt.vi v4, v8, -1
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: 1 4 2.00 vmslt.vx v4, v8, a0
+# CHECK-NEXT: 1 4 2.00 vmnot.m v4, v4
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 5 3.00 vmsltu.vx v4, v8, a1
+# CHECK-NEXT: 1 4 2.00 vmnot.m v4, v4
+# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, mf8, tu, mu
# CHECK-NEXT: 1 4 2.00 vminu.vv v4, v8, v12
# CHECK-NEXT: 1 3 1.00 U vsetvli zero, zero, e8, mf4, tu, mu
# CHECK-NEXT: 1 4 2.00 vminu.vx v4, v8, a0
@@ -1492,7 +1532,7 @@ vmv.v.v v4, v12
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7]
-# CHECK-NEXT: - - 333.00 - 11549.00 374.00 - -
+# CHECK-NEXT: - - 342.00 - 11583.00 385.00 - -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] Instructions:
@@ -1868,6 +1908,26 @@ vmv.v.v v4, v12
# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e64, m8, tu, mu
# CHECK-NEXT: - - - - 17.00 1.00 - - vmseq.vx v4, v8, a0
# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: - - - - 2.00 1.00 - - vmsle.vi v4, v8, 0
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: - - - - 2.00 1.00 - - vmsleu.vi v4, v8, 0
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: - - - - 2.00 1.00 - - vmsne.vv v4, v8, v8
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - 3.00 1.00 - - vmsgtu.vi v4, v8, 0
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: - - - - 5.00 1.00 - - vmsgt.vi v4, v8, 0
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: - - - - 9.00 1.00 - - vmseq.vv v4, v8, v8
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: - - - - 2.00 1.00 - - vmsgt.vi v4, v8, -1
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: - - - - 2.00 1.00 - - vmslt.vx v4, v8, a0
+# CHECK-NEXT: - - - - 2.00 1.00 - - vmnot.m v4, v4
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - 3.00 1.00 - - vmsltu.vx v4, v8, a1
+# CHECK-NEXT: - - - - 2.00 1.00 - - vmnot.m v4, v4
+# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - 2.00 1.00 - - vminu.vv v4, v8, v12
# CHECK-NEXT: - - 1.00 - - - - - vsetvli zero, zero, e8, mf4, tu, mu
# CHECK-NEXT: - - - - 2.00 1.00 - - vminu.vx v4, v8, a0
diff --git a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
index c33cc79..98b8619 100644
--- a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
+++ b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
@@ -1564,30 +1564,30 @@ vzeroupper
# CHECK-NEXT: 2 6 0.50 * vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 6 0.50 * vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubd %xmm0, %xmm1, %xmm2
@@ -1736,7 +1736,7 @@ vzeroupper
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 257.00 215.25 235.25 176.17 176.17 38.00 432.25 2.25 12.67
+# CHECK-NEXT: - 257.00 215.25 235.25 176.17 176.17 38.00 424.25 2.25 12.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -2274,30 +2274,30 @@ vzeroupper
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubd %xmm0, %xmm1, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
index 5bed312..e76d905 100644
--- a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
+++ b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
@@ -596,30 +596,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: 2 6 1.00 * pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 pslld %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * pslld (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * pslld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psllq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllq %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psllq (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psllq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psllw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllw %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psllw (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psllw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrad $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrad %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrad (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrad (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psraw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psraw %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psraw (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psraw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrld %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrld (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psrlq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlq %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrlq (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrlq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrlw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlw %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrlw (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrlw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubb %xmm0, %xmm2
# CHECK-NEXT: 2 6 0.50 * psubb (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubd %xmm0, %xmm2
@@ -689,7 +689,7 @@ xorpd (%rax), %xmm2
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 78.00 70.75 95.75 63.17 63.17 14.00 127.25 2.25 4.67
+# CHECK-NEXT: - 78.00 70.75 95.75 63.17 63.17 14.00 119.25 2.25 4.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -882,30 +882,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - pslld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - pslld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - pslld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - pslld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - pslldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrad $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrad %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrad (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrad (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psraw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psraw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psraw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psraw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - psrldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlw (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubb %xmm0, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - psubb (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubd %xmm0, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s b/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
index 3da547d..376070d 100644
--- a/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
+++ b/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
@@ -1564,30 +1564,30 @@ vzeroupper
# CHECK-NEXT: 2 7 0.50 * vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubd %xmm0, %xmm1, %xmm2
@@ -1736,7 +1736,7 @@ vzeroupper
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 336.00 214.58 236.58 176.17 176.17 38.00 435.58 2.25 12.67
+# CHECK-NEXT: - 336.00 214.58 236.58 176.17 176.17 38.00 427.58 2.25 12.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -2274,30 +2274,30 @@ vzeroupper
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubd %xmm0, %xmm1, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s b/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
index 3813ef4..3b4aeb3 100644
--- a/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
+++ b/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
@@ -596,30 +596,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: 2 7 1.00 * pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 pslld %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * pslld (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * pslld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psllq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllq %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psllq (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psllq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psllw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllw %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psllw (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psllw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrad $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrad %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrad (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrad (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psraw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psraw %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psraw (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psraw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrld %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrld (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psrlq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlq %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrlq (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrlq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrlw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlw %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrlw (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrlw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubb %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psubb (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubd %xmm0, %xmm2
@@ -689,7 +689,7 @@ xorpd (%rax), %xmm2
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 112.00 70.75 95.75 63.17 63.17 14.00 127.25 2.25 4.67
+# CHECK-NEXT: - 112.00 70.75 95.75 63.17 63.17 14.00 119.25 2.25 4.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -882,30 +882,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - pslld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - pslld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - pslld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - pslld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - pslldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrad $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrad %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrad (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrad (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psraw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psraw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psraw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psraw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - psrldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlw (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubb %xmm0, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - psubb (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubd %xmm0, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s
index f184d55..bd7a489 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s
@@ -1563,30 +1563,30 @@ vzeroupper
# CHECK-NEXT: 1 1 0.50 vpsignw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpslld $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpslld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpslld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsllq $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsllq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsllq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsllw $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrad $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrad %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrad %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsraw $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrld $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrlq $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrlq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrlq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrlw $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.33 vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsubb (%rax), %xmm1, %xmm2
@@ -1738,7 +1738,7 @@ vzeroupper
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - 126.00 322.92 233.92 160.50 160.50 19.00 295.92 6.25 19.00 19.00 19.00
+# CHECK-NEXT: - 126.00 322.92 237.92 160.50 160.50 19.00 291.92 6.25 19.00 19.00 19.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -2275,30 +2275,30 @@ vzeroupper
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsignw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpslld $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpslld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpslld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsllq $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsllw $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrad $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrad %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrad %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsraw $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrld $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrlq $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrlw $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.33 0.33 - - - 0.33 - - - - vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.33 0.33 0.50 0.50 - 0.33 - - - - vpsubb (%rax), %xmm1, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s
index a0715ce..ee095f0 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s
@@ -1768,11 +1768,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: 2 8 0.50 * vpsllw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 0.50 vpsllw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 8 0.50 * vpsllw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 4 1.00 vpsllw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: 2 8 0.50 * vpsllw (%rax), %ymm17, %ymm19
@@ -1804,11 +1804,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: 2 8 0.50 * vpsraw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 0.50 vpsraw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 8 0.50 * vpsraw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 4 1.00 vpsraw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: 2 8 0.50 * vpsraw (%rax), %ymm17, %ymm19
@@ -1844,11 +1844,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: 2 8 0.50 * vpsrlw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 0.50 vpsrlw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 8 0.50 * vpsrlw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 4 1.00 vpsrlw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: 2 8 0.50 * vpsrlw (%rax), %ymm17, %ymm19
@@ -2025,7 +2025,7 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - - 255.33 303.33 216.00 216.00 10.00 451.33 - 10.00 10.00 10.00
+# CHECK-NEXT: - - 255.33 307.83 216.00 216.00 10.00 446.83 - 10.00 10.00 10.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -2705,11 +2705,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsllw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %ymm17, %ymm19
@@ -2741,11 +2741,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsraw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %ymm17, %ymm19
@@ -2781,11 +2781,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrlw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %ymm17, %ymm19
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s
index a4a5128..a1bf6c1 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s
@@ -595,30 +595,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pshuflw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pslld $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 pslld %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 pslld %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * pslld (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pslldq $1, %xmm2
# CHECK-NEXT: 1 1 0.50 psllq $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psllq %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psllq %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psllq (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psllw $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psllw %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psllw %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psllw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrad $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrad %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrad %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrad (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psraw $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psraw %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psraw %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psraw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrld $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrld %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrld %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrld (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrldq $1, %xmm2
# CHECK-NEXT: 1 1 0.50 psrlq $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrlq %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrlq %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrlq (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrlw $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrlw %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrlw %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrlw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.33 psubb %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psubb (%rax), %xmm2
@@ -691,7 +691,7 @@ xorpd (%rax), %xmm2
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - 40.00 106.92 98.92 58.50 58.50 7.50 76.42 1.75 8.00 8.00 7.50
+# CHECK-NEXT: - 40.00 106.92 102.92 58.50 58.50 7.50 72.42 1.75 8.00 8.00 7.50
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -883,30 +883,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - pshuflw $1, %xmm0, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - - - pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - pslld $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - pslld %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - pslld %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - pslld (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - pslldq $1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psllq $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psllq %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psllq %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psllq (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psllw $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psllw %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psllw %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psllw (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrad $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrad %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrad %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrad (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psraw $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psraw %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psraw %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psraw (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrld $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrld %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrld %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrld (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - psrldq $1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrlq $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrlq %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrlq %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrlq (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrlw $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrlw %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrlw %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrlw (%rax), %xmm2
# CHECK-NEXT: - - 0.33 0.33 - - - 0.33 - - - - psubb %xmm0, %xmm2
# CHECK-NEXT: - - 0.33 0.33 0.50 0.50 - 0.33 - - - - psubb (%rax), %xmm2
diff --git a/llvm/test/tools/llvm-objcopy/ELF/Inputs/compress-debug-sections.yaml b/llvm/test/tools/llvm-objcopy/ELF/Inputs/compress-debug-sections.yaml
index 67d8435..e2dfee9 100644
--- a/llvm/test/tools/llvm-objcopy/ELF/Inputs/compress-debug-sections.yaml
+++ b/llvm/test/tools/llvm-objcopy/ELF/Inputs/compress-debug-sections.yaml
@@ -43,6 +43,10 @@ Sections:
Type: SHT_PROGBITS
Flags: [ SHF_GROUP ]
Content: '00'
+ - Name: .debug_alloc
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Content: 000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f
Symbols:
- Type: STT_SECTION
Section: .debug_foo
diff --git a/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test b/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test
index e1ebeed..056ae84 100644
--- a/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test
+++ b/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test
@@ -12,8 +12,10 @@
# CHECK: Name Type Address Off Size ES Flg Lk Inf Al
# COMPRESSED: .debug_foo PROGBITS 0000000000000000 000040 {{.*}} 00 C 0 0 8
# COMPRESSED-NEXT: .notdebug_foo PROGBITS 0000000000000000 {{.*}} 000008 00 0 0 0
+# COMPRESSED: .debug_alloc PROGBITS 0000000000000000 {{.*}} 000040 00 A 0 0 0
# UNCOMPRESSED: .debug_foo PROGBITS 0000000000000000 000040 000008 00 0 0 0
# UNCOMPRESSED-NEXT: .notdebug_foo PROGBITS 0000000000000000 {{.*}} 000008 00 0 0 0
+# UNCOMPRESSED: .debug_alloc PROGBITS 0000000000000000 {{.*}} 000040 00 A 0 0 0
## Relocations do not change.
# CHECK: Relocation section '.rela.debug_foo' at offset {{.*}} contains 2 entries:
diff --git a/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zstd.test b/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zstd.test
index d763131..bde1c2f 100644
--- a/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zstd.test
+++ b/llvm/test/tools/llvm-objcopy/ELF/compress-debug-sections-zstd.test
@@ -12,8 +12,10 @@
# CHECK: Name Type Address Off Size ES Flg Lk Inf Al
# COMPRESSED: .debug_foo PROGBITS 0000000000000000 000040 {{.*}} 00 C 0 0 8
# COMPRESSED-NEXT: .notdebug_foo PROGBITS 0000000000000000 {{.*}} 000008 00 0 0 0
+# COMPRESSED: .debug_alloc PROGBITS 0000000000000000 {{.*}} 000040 00 A 0 0 0
# DECOMPRESSED: .debug_foo PROGBITS 0000000000000000 000040 000008 00 0 0 0
# DECOMPRESSED-NEXT: .notdebug_foo PROGBITS 0000000000000000 {{.*}} 000008 00 0 0 0
+# DECOMPRESSED: .debug_alloc PROGBITS 0000000000000000 {{.*}} 000040 00 A 0 0 0
## Relocations do not change.
# CHECK: Relocation section '.rela.debug_foo' at offset {{.*}} contains 2 entries:
diff --git a/llvm/test/tools/llvm-objcopy/ELF/decompress-sections.test b/llvm/test/tools/llvm-objcopy/ELF/decompress-sections.test
new file mode 100644
index 0000000..4258ddb
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/ELF/decompress-sections.test
@@ -0,0 +1,36 @@
+# REQUIRES: zlib
+## Test decompression for different sections.
+
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-objcopy --decompress-debug-sections %t %t.de
+# RUN: llvm-readelf -S %t.de | FileCheck %s
+
+# CHECK: Name Type Address Off Size ES Flg Lk Inf Al
+# CHECK: .debug_alloc PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 AC 0 0 0
+# CHECK-NEXT: .debug_nonalloc PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 0 0 1
+# CHECK-NEXT: .debugx PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 0 0 1
+# CHECK-NEXT: nodebug PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 C 0 0 0
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .debug_alloc
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_COMPRESSED ]
+ Content: 010000000000000040000000000000000100000000000000789cd36280002d3269002f800151
+ - Name: .debug_nonalloc
+ Type: SHT_PROGBITS
+ Flags: [ SHF_COMPRESSED ]
+ Content: 010000000000000040000000000000000100000000000000789cd36280002d3269002f800151
+ - Name: .debugx
+ Type: SHT_PROGBITS
+ Flags: [ SHF_COMPRESSED ]
+ Content: 010000000000000040000000000000000100000000000000789cd36280002d3269002f800151
+ - Name: nodebug
+ Type: SHT_PROGBITS
+ Flags: [ SHF_COMPRESSED ]
+ Content: 010000000000000040000000000000000100000000000000789cd36280002d3269002f800151
diff --git a/llvm/test/tools/llvm-objcopy/ELF/discard-locals-rel.test b/llvm/test/tools/llvm-objcopy/ELF/discard-locals-rel.test
index 3658eb3..00bb8fc 100644
--- a/llvm/test/tools/llvm-objcopy/ELF/discard-locals-rel.test
+++ b/llvm/test/tools/llvm-objcopy/ELF/discard-locals-rel.test
@@ -1,5 +1,5 @@
# RUN: yaml2obj %s -o %t
-# RUN: not llvm-objcopy --discard-locals %t %t2 2>&1 | FileCheck %s
+# RUN: not llvm-objcopy --discard-locals %t %t2 2>&1 | FileCheck %s -DFILE=%t
!ELF
FileHeader:
@@ -23,4 +23,4 @@ Symbols:
Type: STT_FUNC
Section: .text
-# CHECK: not stripping symbol '.L.rel' because it is named in a relocation
+# CHECK: error: '[[FILE]]': not stripping symbol '.L.rel' because it is named in a relocation
diff --git a/llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test b/llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test
new file mode 100644
index 0000000..0f3ab80
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test
@@ -0,0 +1,100 @@
+## This test checks the functionality of options --skip-symbol and --skip-symbols.
+# RUN: yaml2obj %s -o %t.o
+# RUN: echo 'foo[2-3]' > %t.skip.regex
+
+## Check --skip-symbol functionality when changing symbol bindings.
+# RUN: llvm-objcopy %t.o %t2.o --localize-hidden --skip-symbol=foo3
+# RUN: llvm-readelf -s %t2.o | FileCheck %s --check-prefix=LH-SYM
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo1
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo2
+# LH-SYM-DAG: GLOBAL HIDDEN 1 foo3
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo4
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo5
+
+## Check --skip-symbols functionality when changing symbol bindings.
+# RUN: llvm-objcopy %t.o %t1.o --localize-hidden --skip-symbols=%t.skip.regex --regex
+# RUN: llvm-readelf -s %t1.o | FileCheck %s --check-prefix=LH-SYMS
+# LH-SYMS-DAG: LOCAL HIDDEN 1 foo1
+# LH-SYMS-DAG: GLOBAL HIDDEN 1 foo2
+# LH-SYMS-DAG: GLOBAL HIDDEN 1 foo3
+# LH-SYMS-DAG: LOCAL HIDDEN 1 foo4
+# LH-SYMS-DAG: LOCAL HIDDEN 1 foo5
+
+## Check --skip-symbol functionality when changing symbol names.
+# RUN: echo -e "foo1 bar1\nfoo2 bar2" > %t.renames.list
+# RUN: llvm-objcopy %t.o %t4.o --redefine-syms=%t.renames.list \
+# RUN: --skip-symbol='fo*' --wildcard
+# RUN: llvm-readelf -s %t4.o | FileCheck %s --check-prefix=RS-SYM
+# RS-SYM-DAG: foo1
+# RS-SYM-DAG: foo2
+# RS-SYM-DAG: foo3
+# RS-SYM-DAG: foo4
+# RS-SYM-DAG: foo5
+
+## Check --skip-symbols functionality when changing symbol names.
+# RUN: llvm-objcopy %t.o %t3.o --redefine-syms=%t.renames.list \
+# RUN: --skip-symbols=%t.skip.regex --regex
+# RUN: llvm-readelf -s %t3.o | FileCheck %s --check-prefix=RS-SYMS
+# RS-SYMS-DAG: bar1
+# RS-SYMS-DAG: foo2
+# RS-SYMS-DAG: foo3
+# RS-SYMS-DAG: foo4
+# RS-SYMS-DAG: foo5
+
+## Check the functionality when using skip options multiple times.
+# RUN: echo "foo3" > %t.symbol0.list
+# RUN: echo "foo4" > %t.symbol1.list
+# RUN: llvm-objcopy %t.o %t5.o --set-symbol-visibility='foo*'=internal --wildcard \
+# RUN: --skip-symbol=foo1 --skip-symbol=foo2 \
+# RUN: --skip-symbols=%t.symbol0.list --skip-symbols=%t.symbol1.list
+# RUN: llvm-readelf -s %t5.o | FileCheck %s --check-prefix=BOTH
+# BOTH-DAG: GLOBAL HIDDEN 1 foo1
+# BOTH-DAG: GLOBAL HIDDEN 1 foo2
+# BOTH-DAG: GLOBAL HIDDEN 1 foo3
+# BOTH-DAG: GLOBAL HIDDEN 1 foo4
+## Only foo5 is not skipped.
+# BOTH-DAG: GLOBAL INTERNAL 1 foo5
+
+## Check that using an invalid symbol name regex generates an error.
+# RUN: echo '*.' > %t.symbols.regex
+# RUN: not llvm-objcopy %t.o --skip-symbols=%t.symbols.regex --regex 2>&1 | \
+# RUN: FileCheck %s --check-prefix=SYMBOL
+# RUN: not llvm-objcopy %t.o --skip-symbol='*.' --regex 2>&1 | \
+# RUN: FileCheck %s --check-prefix=SYMBOL
+# SYMBOL: error: cannot compile regular expression '*.': repetition-operator operand invalid
+
+## Check passing an invalid filename generates an error.
+# RUN: not llvm-objcopy %t.o --skip-symbols=no_file 2>&1 | \
+# RUN: FileCheck %s --check-prefix=FILE -DMSG=%errc_ENOENT
+# FILE: error: 'no_file': [[MSG]]
+
+!ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+Symbols:
+ - Name: foo1
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo2
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo3
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo4
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo5
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
diff --git a/llvm/test/tools/llvm-objcopy/ELF/strip-reloc-symbol.test b/llvm/test/tools/llvm-objcopy/ELF/strip-reloc-symbol.test
index 63c9e12..941dacce 100644
--- a/llvm/test/tools/llvm-objcopy/ELF/strip-reloc-symbol.test
+++ b/llvm/test/tools/llvm-objcopy/ELF/strip-reloc-symbol.test
@@ -1,5 +1,5 @@
# RUN: yaml2obj %s -o %t
-# RUN: not llvm-objcopy -N foo %t %t2 2>&1 | FileCheck %s
+# RUN: not llvm-objcopy -N foo %t %t2 2>&1 | FileCheck %s -DFILE=%t
!ELF
FileHeader:
@@ -28,4 +28,4 @@ Symbols:
Value: 0x1000
Size: 8
-# CHECK: not stripping symbol 'foo' because it is named in a relocation
+# CHECK: error: '[[FILE]]': not stripping symbol 'foo' because it is named in a relocation
diff --git a/llvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64.dylib b/llvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64.dylib
new file mode 100755
index 0000000..051e28f
--- /dev/null
+++ b/llvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64.dylib
Binary files differ
diff --git a/llvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64_32.dylib b/llvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64_32.dylib
new file mode 100755
index 0000000..d3a3390
--- /dev/null
+++ b/llvm/test/tools/llvm-objdump/MachO/AArch64/Inputs/rel-method-lists-arm64_32.dylib
Binary files differ
diff --git a/llvm/test/tools/llvm-objdump/MachO/AArch64/macho-relative-method-lists.test b/llvm/test/tools/llvm-objdump/MachO/AArch64/macho-relative-method-lists.test
new file mode 100644
index 0000000..b1b96a4
--- /dev/null
+++ b/llvm/test/tools/llvm-objdump/MachO/AArch64/macho-relative-method-lists.test
@@ -0,0 +1,86 @@
+RUN: llvm-objdump --macho --objc-meta-data %p/Inputs/rel-method-lists-arm64_32.dylib | FileCheck %s --check-prefix=CHK32
+RUN: llvm-otool -ov %p/Inputs/rel-method-lists-arm64_32.dylib | FileCheck %s --check-prefix=CHK32
+
+RUN: llvm-objdump --macho --objc-meta-data %p/Inputs/rel-method-lists-arm64.dylib | FileCheck %s --check-prefix=CHK64
+RUN: llvm-otool -ov %p/Inputs/rel-method-lists-arm64.dylib | FileCheck %s --check-prefix=CHK64
+
+CHK32: baseMethods 0x660 (struct method_list_t *)
+CHK32-NEXT: entsize 12 (relative)
+CHK32-NEXT: count 3
+CHK32-NEXT: name 0x144 (0x{{[0-9a-f]*}}) instance_method_00
+CHK32-NEXT: types 0x91 (0x{{[0-9a-f]*}}) v8@0:4
+CHK32-NEXT: imp 0xffffff18 (0x{{[0-9a-f]*}}) -[MyClass instance_method_00]
+CHK32-NEXT: name 0x13c (0x{{[0-9a-f]*}}) instance_method_01
+CHK32-NEXT: types 0x85 (0x{{[0-9a-f]*}}) v8@0:4
+CHK32-NEXT: imp 0xffffff28 (0x{{[0-9a-f]*}}) -[MyClass instance_method_01]
+CHK32-NEXT: name 0x134 (0x{{[0-9a-f]*}}) instance_method_02
+CHK32-NEXT: types 0x79 (0x{{[0-9a-f]*}}) v8@0:4
+CHK32-NEXT: imp 0xffffff38 (0x{{[0-9a-f]*}}) -[MyClass instance_method_02]
+
+CHK32: baseMethods 0x630 (struct method_list_t *)
+CHK32-NEXT: entsize 12 (relative)
+CHK32-NEXT: count 3
+CHK32-NEXT: name 0x180 (0x{{[0-9a-f]*}}) class_method_00
+CHK32-NEXT: types 0xc1 (0x{{[0-9a-f]*}}) v8@0:4
+CHK32-NEXT: imp 0xffffff9c (0x{{[0-9a-f]*}}) +[MyClass class_method_00]
+CHK32-NEXT: name 0x178 (0x{{[0-9a-f]*}}) class_method_01
+CHK32-NEXT: types 0xb5 (0x{{[0-9a-f]*}}) v8@0:4
+CHK32-NEXT: imp 0xffffffac (0x{{[0-9a-f]*}}) +[MyClass class_method_01]
+CHK32-NEXT: name 0x170 (0x{{[0-9a-f]*}}) class_method_02
+CHK32-NEXT: types 0xa9 (0x{{[0-9a-f]*}}) v8@0:4
+CHK32-NEXT: imp 0xffffffbc (0x{{[0-9a-f]*}}) +[MyClass class_method_02]
+
+CHK64: baseMethods 0x6e0 (struct method_list_t *)
+CHK64-NEXT: entsize 12 (relative)
+CHK64-NEXT: count 3
+CHK64-NEXT: name 0x188 (0x{{[0-9a-f]*}}) instance_method_00
+CHK64-NEXT: types 0x91 (0x{{[0-9a-f]*}}) v16@0:8
+CHK64-NEXT: imp 0xffffffa8 (0x{{[0-9a-f]*}}) -[MyClass instance_method_00]
+CHK64-NEXT: name 0x184 (0x{{[0-9a-f]*}}) instance_method_01
+CHK64-NEXT: types 0x85 (0x{{[0-9a-f]*}}) v16@0:8
+CHK64-NEXT: imp 0xffffffa0 (0x{{[0-9a-f]*}}) -[MyClass instance_method_01]
+CHK64-NEXT: name 0x180 (0x{{[0-9a-f]*}}) instance_method_02
+CHK64-NEXT: types 0x79 (0x{{[0-9a-f]*}}) v16@0:8
+CHK64-NEXT: imp 0xffffff98 (0x{{[0-9a-f]*}}) -[MyClass instance_method_02]
+
+CHK64: baseMethods 0x6b0 (struct method_list_t *)
+CHK64-NEXT: entsize 12 (relative)
+CHK64-NEXT: count 3
+CHK64-NEXT: name 0x1d0 (0x{{[0-9a-f]*}}) class_method_00
+CHK64-NEXT: types 0xc1 (0x{{[0-9a-f]*}}) v16@0:8
+CHK64-NEXT: imp 0xffffffe4 (0x{{[0-9a-f]*}}) +[MyClass class_method_00]
+CHK64-NEXT: name 0x1cc (0x{{[0-9a-f]*}}) class_method_01
+CHK64-NEXT: types 0xb5 (0x{{[0-9a-f]*}}) v16@0:8
+CHK64-NEXT: imp 0xffffffdc (0x{{[0-9a-f]*}}) +[MyClass class_method_01]
+CHK64-NEXT: name 0x1c8 (0x{{[0-9a-f]*}}) class_method_02
+CHK64-NEXT: types 0xa9 (0x{{[0-9a-f]*}}) v16@0:8
+CHK64-NEXT: imp 0xffffffd4 (0x{{[0-9a-f]*}}) +[MyClass class_method_02]
+
+######## Generate rel-method-lists-arm64.dylib ########
+// clang -c main.mm -o main.o -target arm64-apple-macos -arch arm64
+// ld64.ld64 -dylib -demangle -dynamic main.o -o rel-method-lists-arm64.dylib -syslibroot MacOSX14.2.sdk -segalign 0x10 -objc_relative_method_lists
+
+######## Generate rel-method-lists-arm64_32.dylib ########
+// clang -c main.mm -o main.o -target arm64_32-apple-watchos -arch arm64_32
+// ld64.ld64 -dylib -demangle -dynamic main.o -o rel-method-lists-arm64_32.dylib -syslibroot WatchOS.sdk -segalign 0x10 -objc_relative_method_lists
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~ main.mm ~~~~~~~~~~~~~~~~~~~~~~~~~
+__attribute__((objc_root_class))
+@interface MyClass
+- (void)instance_method_00;
+- (void)instance_method_01;
+- (void)instance_method_02;
++ (void)class_method_00;
++ (void)class_method_01;
++ (void)class_method_02;
+@end
+@implementation MyClass
+- (void)instance_method_00 {}
+- (void)instance_method_01 {}
+- (void)instance_method_02 {}
++ (void)class_method_00 {}
++ (void)class_method_01 {}
++ (void)class_method_02 {}
+@end
+void *_objc_empty_cache;
+void *_objc_empty_vtable;
diff --git a/llvm/test/tools/llvm-profdata/Inputs/vtable-value-prof.proftext b/llvm/test/tools/llvm-profdata/Inputs/vtable-value-prof.proftext
new file mode 100644
index 0000000..372f9f97
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/Inputs/vtable-value-prof.proftext
@@ -0,0 +1,74 @@
+# IR level Instrumentation Flag
+:ir
+_Z10createTypei
+# Func Hash:
+146835647075900052
+# Num Counters:
+2
+# Counter Values:
+750
+250
+
+_ZN8Derived15func1Eii
+# Func Hash:
+742261418966908927
+# Num Counters:
+1
+# Counter Values:
+250
+
+_ZN8Derived15func2Eii
+# Func Hash:
+742261418966908927
+# Num Counters:
+1
+# Counter Values:
+250
+
+main
+# Func Hash:
+1124236338992350536
+# Num Counters:
+2
+# Counter Values:
+1000
+1
+# Num Value Kinds:
+2
+# ValueKind = IPVK_IndirectCallTarget:
+0
+# NumValueSites:
+2
+2
+vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func1Eii:750
+_ZN8Derived15func1Eii:250
+2
+vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func2Eii:750
+_ZN8Derived15func2Eii:250
+# ValueKind = IPVK_VTableTarget:
+2
+# NumValueSites:
+2
+2
+vtable_prof.cc;_ZTVN12_GLOBAL__N_18Derived2E:750
+_ZTV8Derived1:250
+2
+vtable_prof.cc;_ZTVN12_GLOBAL__N_18Derived2E:750
+_ZTV8Derived1:250
+
+vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func1Eii
+# Func Hash:
+742261418966908927
+# Num Counters:
+1
+# Counter Values:
+750
+
+vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func2Eii
+# Func Hash:
+742261418966908927
+# Num Counters:
+1
+# Counter Values:
+750
+
diff --git a/llvm/test/tools/llvm-profdata/vtable-value-prof.test b/llvm/test/tools/llvm-profdata/vtable-value-prof.test
new file mode 100644
index 0000000..378c2e1
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/vtable-value-prof.test
@@ -0,0 +1,83 @@
+; RUN: rm -rf %t && mkdir %t && cd %t
+
+; Generate indexed profiles from text profiles
+RUN: llvm-profdata merge %S/Inputs/vtable-value-prof.proftext -o indexed.profdata
+
+; Show indexed profiles
+RUN: llvm-profdata show --function=main --ic-targets --show-vtables indexed.profdata | FileCheck %s --check-prefix=INDEXED
+
+; Show text profiles
+RUN: llvm-profdata show --function=main --ic-targets --show-vtables --text %S/Inputs/vtable-value-prof.proftext | FileCheck %s --check-prefix=ICTEXT
+
+; Convert indexed profiles to its textual output and show it.
+RUN: llvm-profdata merge --text -o text-from-indexed.proftext indexed.profdata
+RUN: llvm-profdata show --function=main --ic-targets --show-vtables text-from-indexed.proftext | FileCheck %s --check-prefix=INDEXED
+RUN: llvm-profdata show --function=main --ic-targets --show-vtables --text text-from-indexed.proftext | FileCheck %s --check-prefix=ICTEXT
+
+INDEXED: Counters:
+INDEXED-NEXT: main:
+INDEXED-NEXT: Hash: 0x0f9a16fe6d398548
+INDEXED-NEXT: Counters: 2
+INDEXED-NEXT: Indirect Call Site Count: 2
+INDEXED-NEXT: Number of instrumented vtables: 2
+INDEXED-NEXT: Indirect Target Results:
+INDEXED-NEXT: [ 0, {{.*}}vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func1Eii, 750 ] (75.00%)
+INDEXED-NEXT: [ 0, _ZN8Derived15func1Eii, 250 ] (25.00%)
+INDEXED-NEXT: [ 1, {{.*}}vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func2Eii, 750 ] (75.00%)
+INDEXED-NEXT: [ 1, _ZN8Derived15func2Eii, 250 ] (25.00%)
+INDEXED-NEXT: VTable Results:
+INDEXED-NEXT: [ 0, {{.*}}vtable_prof.cc;_ZTVN12_GLOBAL__N_18Derived2E, 750 ] (75.00%)
+INDEXED-NEXT: [ 0, _ZTV8Derived1, 250 ] (25.00%)
+INDEXED-NEXT: [ 1, {{.*}}vtable_prof.cc;_ZTVN12_GLOBAL__N_18Derived2E, 750 ] (75.00%)
+INDEXED-NEXT: [ 1, _ZTV8Derived1, 250 ] (25.00%)
+INDEXED-NEXT: Instrumentation level: IR entry_first = 0
+INDEXED-NEXT: Functions shown: 1
+INDEXED-NEXT: Total functions: 6
+INDEXED-NEXT: Maximum function count: 1000
+INDEXED-NEXT: Maximum internal block count: 250
+INDEXED-NEXT: Statistics for indirect call sites profile:
+INDEXED-NEXT: Total number of sites: 2
+INDEXED-NEXT: Total number of sites with values: 2
+INDEXED-NEXT: Total number of profiled values: 4
+INDEXED-NEXT: Value sites histogram:
+INDEXED-NEXT: NumTargets, SiteCount
+INDEXED-NEXT: 2, 2
+INDEXED-NEXT: Statistics for vtable profile:
+INDEXED-NEXT: Total number of sites: 2
+INDEXED-NEXT: Total number of sites with values: 2
+INDEXED-NEXT: Total number of profiled values: 4
+INDEXED-NEXT: Value sites histogram:
+INDEXED-NEXT: NumTargets, SiteCount
+INDEXED-NEXT: 2, 2
+
+ICTEXT: :ir
+ICTEXT: main
+ICTEXT: # Func Hash:
+ICTEXT: 1124236338992350536
+ICTEXT: # Num Counters:
+ICTEXT: 2
+ICTEXT: # Counter Values:
+ICTEXT: 1000
+ICTEXT: 1
+ICTEXT: # Num Value Kinds:
+ICTEXT: 2
+ICTEXT: # ValueKind = IPVK_IndirectCallTarget:
+ICTEXT: 0
+ICTEXT: # NumValueSites:
+ICTEXT: 2
+ICTEXT: 2
+ICTEXT: {{.*}}vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func1Eii:750
+ICTEXT: _ZN8Derived15func1Eii:250
+ICTEXT: 2
+ICTEXT: {{.*}}vtable_prof.cc;_ZN12_GLOBAL__N_18Derived25func2Eii:750
+ICTEXT: _ZN8Derived15func2Eii:250
+ICTEXT: # ValueKind = IPVK_VTableTarget:
+ICTEXT: 2
+ICTEXT: # NumValueSites:
+ICTEXT: 2
+ICTEXT: 2
+ICTEXT: {{.*}}vtable_prof.cc;_ZTVN12_GLOBAL__N_18Derived2E:750
+ICTEXT: _ZTV8Derived1:250
+ICTEXT: 2
+ICTEXT: {{.*}}vtable_prof.cc;_ZTVN12_GLOBAL__N_18Derived2E:750
+ICTEXT: _ZTV8Derived1:250
diff --git a/llvm/test/tools/llvm-profgen/Inputs/coff-profile.exe b/llvm/test/tools/llvm-profgen/Inputs/coff-profile.exe
new file mode 100644
index 0000000..309476a
--- /dev/null
+++ b/llvm/test/tools/llvm-profgen/Inputs/coff-profile.exe
Binary files differ
diff --git a/llvm/test/tools/llvm-profgen/Inputs/coff-profile.perfscript b/llvm/test/tools/llvm-profgen/Inputs/coff-profile.perfscript
new file mode 100644
index 0000000..96eb878c
--- /dev/null
+++ b/llvm/test/tools/llvm-profgen/Inputs/coff-profile.perfscript
@@ -0,0 +1,13 @@
+PERF_RECORD_MMAP2 5752/0: [0x7ff70a1b0000(0x640000) @ 0x1000 00:00 0 0]: r-xp c:\Users\haohaiwe\Desktop\coff-profile.exe
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/P/X/A/0 0x7ff70a1b1415/0x7ff70a1b13b0/M/X/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/-/X/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
+ 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0 0x7ff70a1b1482/0x7ff70a1b1430/P/-/A/0
diff --git a/llvm/test/tools/llvm-profgen/coff-profile.test b/llvm/test/tools/llvm-profgen/coff-profile.test
new file mode 100644
index 0000000..5578f73
--- /dev/null
+++ b/llvm/test/tools/llvm-profgen/coff-profile.test
@@ -0,0 +1,79 @@
+; RUN: llvm-profgen --format=text --perfscript=%S/Inputs/coff-profile.perfscript --binary=%S/Inputs/coff-profile.exe --output=%t
+; RUN: FileCheck %s --input-file %t
+
+CHECK: main:31837:0
+CHECK-NEXT: 0: 0
+CHECK-NEXT: 3.1: 0
+CHECK-NEXT: 3.2: 0
+CHECK-NEXT: 8: 0
+CHECK-NEXT: 65501: 0
+CHECK-NEXT: 1: ??$init@HG@MyNameSpace2@@YAXHPEAG@Z:0
+CHECK-NEXT: 1: 0
+CHECK-NEXT: 1.1: 0
+CHECK-NEXT: 1.2: 0
+CHECK-NEXT: 2: 0
+CHECK-NEXT: 65514: 0
+CHECK-NEXT: 4: ?work1@?$MyClass@GH@MyNameSpace1@@QEAAXQEAGH@Z:3193
+CHECK-NEXT: 0: ?work@?$MyClass@GH@MyNameSpace1@@AEAAXQEAGHH@Z:3193
+CHECK-NEXT: 1.1: 31
+CHECK-NEXT: 1.2: 31
+CHECK-NEXT: 2: 31
+CHECK-NEXT: 3: 31
+CHECK-NEXT: 65530: 0
+CHECK-NEXT: 5: ?work2@?$MyClass@GH@MyNameSpace1@@QEAAXQEAGH@Z:28644
+CHECK-NEXT: 0: ?work@?$MyClass@GH@MyNameSpace1@@AEAAXQEAGHH@Z:28644
+CHECK-NEXT: 1.1: 341
+CHECK-NEXT: 1.2: 341
+CHECK-NEXT: 2: 341
+CHECK-NEXT: 3: 341
+CHECK-NEXT: 65530: 0
+CHECK-NEXT: 7: ?print@MyNameSpace2@@YAXPEAGH@Z:0
+CHECK-NEXT: 1: 0
+
+; Original code
+; clang-cl.exe -O2 -gdwarf -gline-tables-only coff-profile.cpp -fuse-ld=lld -Xclang -fdebug-info-for-profiling -link -debug:dwarf
+
+#include <stdio.h>
+
+namespace MyNameSpace1 {
+
+template <typename T1, typename T2> class MyClass {
+ void work(T1 map[], T2 n, T2 m) {
+ for (int i = 1; i < n; i++) {
+ map[i] = map[i - 1] * map[i - 1];
+ map[i] += (i * map[i - 1]) / m + i % m;
+ }
+ }
+
+public:
+ void work1(T1 map[], T2 n) { work(map, n, 7); }
+ void work2(T1 map[], T2 n) { work(map, n, 3); }
+};
+
+} // namespace MyNameSpace1
+
+namespace MyNameSpace2 {
+
+template <typename T1, typename T2> void init(T1 c, T2 *p) {
+ for (int i = 0; i < c * 1000000; i++) {
+ p[i] = i / 3 + (i * i) % 3;
+ }
+}
+
+void print(unsigned short *p, int i) {
+ printf("%d %d %d\n", p[i * i * 100], p[i * i * 100 + 1], p[i * i * 100 + 2]);
+}
+
+} // namespace MyNameSpace2
+
+unsigned short M[3000000];
+int main(int argc, char *argv[]) {
+ MyNameSpace2::init(argc, M);
+ MyNameSpace1::MyClass<unsigned short, int> Obj;
+ for (int i = 0; i <= argc * 10; i++) {
+ Obj.work1(&M[argc], argc * 100000);
+ Obj.work2(&M[argc * argc], argc * 1000000);
+ }
+ MyNameSpace2::print(M, argc);
+ return 0;
+}
diff --git a/llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test
index f4c73de..083c296 100644
--- a/llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test
+++ b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test
@@ -10,6 +10,7 @@
# CHECK-NEXT: [ 18] x.c.
# CHECK-NEXT: [ 1e] .
# CHECK-NEXT: [ 20] .
+# CHECK-EMPTY:
# CHECK-NEXT: Hex dump of section '.b':
# CHECK-NEXT: warning: '[[FILE]]': LLVM was not built with LLVM_ENABLE_ZLIB or did not find zlib at build time
# CHECK-NEXT: 0x00000000 01000000 00000000 01000000 00000000 ................
diff --git a/llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test
index ea7a885..c1d12a6 100644
--- a/llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test
+++ b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test
@@ -28,6 +28,7 @@
# COMPRESSED: String dump of section '.not_null_terminated':
# COMPRESSED-NEXT: [ 0] no
# COMPRESSED-NEXT: [ 3] null
+# COMPRESSED-EMPTY:
# COMPRESSED-NEXT: Hex dump of section '.strings':
# COMPRESSED-NEXT: 0x00000000 01000000 00000000 16000000 00000000 ................
# COMPRESSED-NEXT: 0x00000010 00000000 00000000 789ccb48 2d4a6548 ........x..H-JeH
@@ -39,6 +40,7 @@
# INVALID: String dump of section '.invalid1':
# INVALID-NEXT: warning: '[[FILE]]': corrupted compressed section header
# INVALID-NEXT: [ 0] .
+# INVALID-EMPTY:
# INVALID-NEXT: Hex dump of section '.invalid2':
# INVALID-NEXT: warning: '[[FILE]]': zlib error: Z_DATA_ERROR
# INVALID-NEXT: 0x00000000 01000000 00000000 16000000 00000000 ................
diff --git a/llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test b/llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test
index 65da952..98c7cb0 100644
--- a/llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test
+++ b/llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test
@@ -9,6 +9,7 @@
# CHECK-NEXT: [ 10] .
# CHECK-NEXT: [ 18] (./. ..
# CHECK-NEXT: [ 21] .
+# CHECK-EMPTY:
# CHECK-NEXT: Hex dump of section '.b':
# CHECK-NEXT: warning: '[[FILE]]': LLVM was not built with LLVM_ENABLE_ZSTD or did not find zstd at build time
# CHECK-NEXT: 0x00000000 02000000 00000000 01000000 00000000 ................
diff --git a/llvm/test/tools/llvm-readobj/ELF/hex-dump-multi.s b/llvm/test/tools/llvm-readobj/ELF/hex-dump-multi.s
index 33ef534..942bfc4 100644
--- a/llvm/test/tools/llvm-readobj/ELF/hex-dump-multi.s
+++ b/llvm/test/tools/llvm-readobj/ELF/hex-dump-multi.s
@@ -1,10 +1,12 @@
# REQUIRES: x86-registered-target
# RUN: llvm-mc -filetype=obj -triple x86_64 %s -o %t.o
-# RUN: llvm-readobj -x .a -x .b %t.o | FileCheck %s
+# RUN: llvm-readobj -x .a -x .b %t.o | FileCheck %s --check-prefixes=HEADER,CHECK
# RUN: llvm-readelf -x .a -x .b %t.o | FileCheck %s
-# CHECK: Hex dump of section '.a':
+# HEADER: LoadName:
+# CHECK: {{^$}}
+# CHECK-NEXT: Hex dump of section '.a':
# CHECK-NEXT: 0x00000000 00
# CHECK-EMPTY:
# CHECK-NEXT: Hex dump of section '.b':
diff --git a/llvm/test/tools/llvm-readobj/ELF/hex-dump.test b/llvm/test/tools/llvm-readobj/ELF/hex-dump.test
index 7829944..71212de 100644
--- a/llvm/test/tools/llvm-readobj/ELF/hex-dump.test
+++ b/llvm/test/tools/llvm-readobj/ELF/hex-dump.test
@@ -46,7 +46,8 @@ FileHeader:
# RUN: llvm-readelf --hex-dump=.sec %t2.out1 | \
# RUN: FileCheck %s --match-full-lines --strict-whitespace --check-prefix=SPACES1
-# SPACES1:Hex dump of section '.sec':
+# SPACES1:{{^$}}
+# SPACES1-NEXT:Hex dump of section '.sec':
# SPACES1-NEXT:0x00000000 00000000 00000000 00000000 00000000 ................
# SPACES1-NEXT:0x00000010 0000 ..
@@ -55,7 +56,8 @@ FileHeader:
# RUN: llvm-readelf --hex-dump=.sec %t2.out2 | \
# RUN: FileCheck %s --match-full-lines --strict-whitespace --check-prefix=SPACES2
-# SPACES2:Hex dump of section '.sec':
+# SPACES2:{{^$}}
+# SPACES2-NEXT:Hex dump of section '.sec':
# SPACES2-NEXT:0x00000000 00000000 00000000 00000000 00000000 ................
# SPACES2-NEXT:0x00000010 00000000 00000000 00000000 0000 ..............
@@ -64,7 +66,8 @@ FileHeader:
# RUN: llvm-readelf --hex-dump=.sec %t2.out3 | \
# RUN: FileCheck %s --match-full-lines --strict-whitespace --check-prefix=SPACES3
-# SPACES3:Hex dump of section '.sec':
+# SPACES3:{{^$}}
+# SPACES3-NEXT:Hex dump of section '.sec':
# SPACES3-NEXT:0x00000000 00000000 00000000 00000000 00000000 ................
# SPACES3-NEXT:0x00000010 00000000 00000000 00000000 ............
diff --git a/llvm/test/tools/llvm-readobj/ELF/machine-specific-section-types.test b/llvm/test/tools/llvm-readobj/ELF/machine-specific-section-types.test
index f952438..f65793c 100644
--- a/llvm/test/tools/llvm-readobj/ELF/machine-specific-section-types.test
+++ b/llvm/test/tools/llvm-readobj/ELF/machine-specific-section-types.test
@@ -17,6 +17,10 @@
# RUN: llvm-readobj --section-headers %t-aarch64.o | FileCheck %s --check-prefix=AARCH64-LLVM
# RUN: llvm-readelf --section-headers %t-aarch64.o | FileCheck %s --check-prefix=AARCH64-GNU
+# RUN: yaml2obj %s --docnum=5 -o %t-hexagon.o
+# RUN: llvm-readobj --section-headers %t-hexagon.o | FileCheck %s --check-prefix=HEXAGON-LLVM
+# RUN: llvm-readelf --section-headers %t-hexagon.o | FileCheck %s --check-prefix=HEXAGON-GNU
+
# ARM-LLVM: Name: exidx
# ARM-LLVM: Type: SHT_ARM_EXIDX
# ARM-LLVM: Name: preemptmap
@@ -64,6 +68,13 @@
# AARCH64-GNU: .memtag.globals.dynamic AARCH64_MEMTAG_GLOBALS_DYNAMIC
# AARCH64-GNU: .memtag.globals.static AARCH64_MEMTAG_GLOBALS_STATIC
+# HEXAGON-LLVM: Name: hexagon_ordered
+# HEXAGON-LLVM: Type: SHT_HEX_ORDERED
+# HEXAGON-LLVM: Name: .hexagon.attributes
+# HEXAGON-LLVM: Type: SHT_HEXAGON_ATTRIBUTES
+
+# HEXAGON-GNU: hexagon_ordered HEX_ORDERED
+
--- !ELF
FileHeader:
Class: ELFCLASS64
@@ -122,3 +133,15 @@ Sections:
Type: SHT_AARCH64_MEMTAG_GLOBALS_DYNAMIC
- Name: .memtag.globals.static
Type: SHT_AARCH64_MEMTAG_GLOBALS_STATIC
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_HEXAGON
+Sections:
+ - Name: hexagon_ordered
+ Type: SHT_HEX_ORDERED
+ - Name: .hexagon.attributes
+ Type: SHT_HEXAGON_ATTRIBUTES
diff --git a/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test b/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
index 7291282..cf95b51 100644
--- a/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
+++ b/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
@@ -119,6 +119,7 @@
# CHECK: Type: R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC (571)
# CHECK: Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12 (572)
# CHECK: Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC (573)
+# CHECK: Type: R_AARCH64_AUTH_ABS64 (580)
# CHECK: Type: R_AARCH64_COPY (1024)
# CHECK: Type: R_AARCH64_GLOB_DAT (1025)
# CHECK: Type: R_AARCH64_JUMP_SLOT (1026)
@@ -128,6 +129,7 @@
# CHECK: Type: R_AARCH64_TLS_TPREL64 (1030)
# CHECK: Type: R_AARCH64_TLSDESC (1031)
# CHECK: Type: R_AARCH64_IRELATIVE (1032)
+# CHECK: Type: R_AARCH64_AUTH_RELATIVE (1041)
--- !ELF
FileHeader:
@@ -254,6 +256,7 @@ Sections:
- Type: R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC
- Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12
- Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC
+ - Type: R_AARCH64_AUTH_ABS64
- Type: R_AARCH64_COPY
- Type: R_AARCH64_GLOB_DAT
- Type: R_AARCH64_JUMP_SLOT
@@ -263,3 +266,4 @@ Sections:
- Type: R_AARCH64_TLS_TPREL64
- Type: R_AARCH64_TLSDESC
- Type: R_AARCH64_IRELATIVE
+ - Type: R_AARCH64_AUTH_RELATIVE
diff --git a/llvm/test/tools/llvm-readobj/ELF/string-dump-multi.s b/llvm/test/tools/llvm-readobj/ELF/string-dump-multi.s
index 29d7ef0..36a115b 100644
--- a/llvm/test/tools/llvm-readobj/ELF/string-dump-multi.s
+++ b/llvm/test/tools/llvm-readobj/ELF/string-dump-multi.s
@@ -1,10 +1,12 @@
# REQUIRES: x86-registered-target
# RUN: llvm-mc -filetype=obj -triple x86_64 %s -o %t.o
-# RUN: llvm-readobj -p .a -p .b %t.o | FileCheck %s
+# RUN: llvm-readobj -p .a -p .b %t.o | FileCheck %s --check-prefixes=HEADER,CHECK
# RUN: llvm-readelf -p .a -p .b %t.o | FileCheck %s
-# CHECK: String dump of section '.a':
+# HEADER: LoadName:
+# CHECK: {{^$}}
+# CHECK-NEXT: String dump of section '.a':
# CHECK-NEXT: [ 0] 0
# CHECK-EMPTY:
# CHECK-NEXT: String dump of section '.b':
diff --git a/llvm/test/tools/llvm-readobj/ELF/string-dump.test b/llvm/test/tools/llvm-readobj/ELF/string-dump.test
index c06b274..1d7a177 100644
--- a/llvm/test/tools/llvm-readobj/ELF/string-dump.test
+++ b/llvm/test/tools/llvm-readobj/ELF/string-dump.test
@@ -3,7 +3,7 @@
# RUN: llvm-readobj --string-dump=.strings \
# RUN: --string-dump=.not_null_terminated %t > %t.readobj.out
-# RUN: FileCheck %s --input-file=%t.readobj.out
+# RUN: FileCheck %s --input-file=%t.readobj.out --check-prefixes=HEADER,CHECK
# Also test the different ways --string-dump can be specified, i.e. as a short
# flag (-p), with different prefix modes (-p .foo, -p=.foo, -p.foo), and with
@@ -23,7 +23,9 @@
# RUN: llvm-readelf -hp1 -p2 %t | cmp %t.readelf.out -
# RUN: llvm-readelf -hp 1 -p.not_null_terminated %t | cmp %t.readelf.out -
-# CHECK: String dump of section '.strings':
+# HEADER: LoadName:
+# CHECK: {{^$}}
+# CHECK-NEXT: String dump of section '.strings':
# CHECK-NEXT: [ 0] here
# CHECK-NEXT: [ 5] are
# CHECK-NEXT: [ 9] some
diff --git a/llvm/test/tools/llvm-reduce/remove-dp-values.ll b/llvm/test/tools/llvm-reduce/remove-dp-values.ll
index 40ff9f3..d137b279 100644
--- a/llvm/test/tools/llvm-reduce/remove-dp-values.ll
+++ b/llvm/test/tools/llvm-reduce/remove-dp-values.ll
@@ -1,7 +1,7 @@
; RUN: llvm-reduce --abort-on-invalid-reduction --test FileCheck --test-arg --check-prefixes=CHECK-INTERESTINGNESS --test-arg %s --test-arg --input-file %s -o %t --try-experimental-debuginfo-iterators
; RUN: FileCheck --check-prefixes=CHECK-FINAL --input-file=%t %s --implicit-check-not=dbg.value
-; Test that we can, in RemoveDIs mode / DPValues mode (where variable location
+; Test that we can, in RemoveDIs mode / DbgVariableRecords mode (where variable location
; information isn't an instruction), remove one variable location assignment
; but not another.